prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sherwinbahmani/4dfy
# Path: threestudio/models/background/base.py
class BaseBackground(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
def configure(self):
pass
def forward(self, dirs: Float[Tensor, "*B 3"]) -> Float[Tensor, "*B 3"]:
raise NotImplementedError
# Path: threestudio/models/geometry/base.py
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = False
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt":
self.isosurface_helper = MarchingTetrahedraHelper(
self.cfg.isosurface_resolution,
f"load/tets/{self.cfg.isosurface_resolution}_tets.npz",
).to(self.device)
else:
raise AttributeError(
"Unknown isosurface method {self.cfg.isosurface_method}"
)
def forward(
self, points: Float[Tensor, "*N Di"], output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
raise NotImplementedError
def forward_field(
self, points: Float[Tensor, "*N Di"]
) -> Tuple[Float[Tensor, "*N 1"], Optional[Float[Tensor, "*N 3"]]]:
# return the value of the implicit field, could be density / signed distance
# also return a deformation field if the grid vertices can be optimized
raise NotImplementedError
def forward_level(
self, field: Float[Tensor, "*N 1"], threshold: float
) -> Float[Tensor, "*N 1"]:
# return the value of the implicit field, where the zero level set represents the surface
raise NotImplementedError
def _isosurface(self, bbox: Float[Tensor, "2 3"], fine_stage: bool = False) -> Mesh:
def batch_func(x):
# scale to bbox as the input vertices are in [0, 1]
field, deformation = self.forward_field(
scale_tensor(
x.to(bbox.device), self.isosurface_helper.points_range, bbox
),
)
field = field.to(
x.device
) # move to the same device as the input (could be CPU)
if deformation is not None:
deformation = deformation.to(x.device)
return field, deformation
assert self.isosurface_helper is not None
field, deformation = chunk_batch(
batch_func,
self.cfg.isosurface_chunk,
self.isosurface_helper.grid_vertices,
)
threshold: float
if isinstance(self.cfg.isosurface_threshold, float):
threshold = self.cfg.isosurface_threshold
elif self.cfg.isosurface_threshold == "auto":
eps = 1.0e-5
threshold = field[field > eps].mean().item()
threestudio.info(
f"Automatically determined isosurface threshold: {threshold}"
)
else:
raise TypeError(
f"Unknown isosurface_threshold {self.cfg.isosurface_threshold}"
)
level = self.forward_level(field, threshold)
mesh: Mesh = self.isosurface_helper(level, deformation=deformation)
mesh.v_pos = scale_tensor(
mesh.v_pos, self.isosurface_helper.points_range, bbox
) # scale to bbox as the grid vertices are in [0, 1]
mesh.add_extra("bbox", bbox)
if self.cfg.isosurface_remove_outliers:
# remove outliers components with small number of faces
# only enabled when the mesh is not differentiable
mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)
return mesh
def isosurface(self) -> Mesh:
if not self.cfg.isosurface:
raise NotImplementedError(
"Isosurface is not enabled in the current configuration"
)
self._initilize_isosurface_helper()
if self.cfg.isosurface_coarse_to_fine:
threestudio.debug("First run isosurface to get a tight bounding box ...")
with torch.no_grad():
mesh_coarse = self._isosurface(self.bbox)
vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)
vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])
vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])
threestudio.debug("Run isosurface again with the tight bounding box ...")
mesh = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)
else:
mesh = self._isosurface(self.bbox)
return mesh
# Path: threestudio/models/materials/base.py
class BaseMaterial(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
requires_normal: bool = False
def configure(self):
pass
def forward(self, *args, **kwargs) -> Float[Tensor, "*B 3"]:
raise NotImplementedError
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
# Path: threestudio/models/renderers/base.py
class VolumeRenderer(Renderer):
pass
# Path: threestudio/utils/ops.py
def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:
if chunk_size <= 0:
return func(*args, **kwargs)
B = None
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, torch.Tensor):
B = arg.shape[0]
break
assert (
B is not None
), "No tensor found in args or kwargs, cannot determine batch size."
out = defaultdict(list)
out_type = None
for i in range(0, B, chunk_size):
out_chunk = func(
*[
arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg
for arg in args
],
**{
k: arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg
for k, arg in kwargs.items()
},
)
if out_chunk is None:
continue
out_type = type(out_chunk)
if isinstance(out_chunk, torch.Tensor):
out_chunk = {0: out_chunk}
elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):
chunk_length = len(out_chunk)
out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}
elif isinstance(out_chunk, dict):
pass
else:
print(
f"Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}."
)
exit(1)
for k, v in out_chunk.items():
v = v if torch.is_grad_enabled() else v.detach()
out[k].append(v)
if out_type is None:
return None
out_merged: Dict[Any, Optional[torch.Tensor]] = {}
for k, v in out.items():
if all([vv is None for vv in v]):
# allow None in return value
out_merged[k] = None
elif all([isinstance(vv, torch.Tensor) for vv in v]):
out_merged[k] = torch.cat(v, dim=0)
else:
raise TypeError(
f"Unsupported types in return value of func: {[type(vv) for vv in v if not isinstance(vv, torch.Tensor)]}"
)
if out_type is torch.Tensor:
return out_merged[0]
elif out_type in [tuple, list]:
return out_type([out_merged[i] for i in range(chunk_length)])
elif out_type is dict:
return out_merged
# Path: threestudio/models/renderers/nerf_volume_renderer.py
from dataclasses import dataclass
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.utils.ops import chunk_batch
from threestudio.utils.typing import *
import nerfacc
import torch
import torch.nn.functional as F
import threestudio
weights_, _, _ = nerfacc.render_weight_from_density(
t_starts[..., 0],
t_ends[..., 0],
geo_out["density"][..., 0],
ray_indices=ray_indices,
n_rays=n_rays,
)
weights = weights_[..., None]
opacity: Float[Tensor, "Nr 1"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=None, ray_indices=ray_indices, n_rays=n_rays
)
depth: Float[Tensor, "Nr 1"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=t_positions, ray_indices=ray_indices, n_rays=n_rays
)
comp_rgb_fg: Float[Tensor, "Nr Nc"] = nerfacc.accumulate_along_rays(
weights[..., 0], values=rgb_fg_all, ray_indices=ray_indices, n_rays=n_rays
)
# populate depth and opacity to each point
t_depth = depth[ray_indices]
z_variance = nerfacc.accumulate_along_rays(
weights[..., 0],
values=(t_positions - t_depth) ** 2,
ray_indices=ray_indices,
n_rays=n_rays,
)
comp_rgb = comp_rgb_fg + comp_rgb_bg * (1.0 - opacity)
out = {
"comp_rgb": comp_rgb.view(batch_size, height, width, -1),
"comp_rgb_fg": comp_rgb_fg.view(batch_size, height, width, -1),
"comp_rgb_bg": comp_rgb_bg.view(batch_size, height, width, -1),
"opacity": opacity.view(batch_size, height, width, 1),
"depth": depth.view(batch_size, height, width, 1),
"z_variance": z_variance.view(batch_size, height, width, 1),
}
if self.training:
out.update(
{
"weights": weights,
"t_points": t_positions,
"t_intervals": t_intervals,
"t_dirs": t_dirs,
"ray_indices": ray_indices,
"points": positions,
**geo_out,
}
)
if "normal" in geo_out:
if self.cfg.return_comp_normal:
comp_normal: Float[Tensor, "Nr 3"] = nerfacc.accumulate_along_rays(
weights[..., 0],
values=geo_out["normal"],
ray_indices=ray_indices,
n_rays=n_rays,
)
comp_normal = F.normalize(comp_normal, dim=-1)
comp_normal = (
(comp_normal + 1.0) / 2.0 * opacity
) # for visualization
out.update(
{
"comp_normal": comp_normal.view(
batch_size, height, width, 3
),
}
)
if self.cfg.return_normal_perturb:
normal_perturb = self.geometry(
positions + torch.randn_like(positions) * 1e-2,
output_normal=self.material.requires_normal,
)["normal"]
out.update({"normal_perturb": normal_perturb})
else:
if "normal" in geo_out:
comp_normal = nerfacc.accumulate_along_rays(
weights[..., 0],
values=geo_out["normal"],
ray_indices=ray_indices,
n_rays=n_rays,
)
comp_normal = F.normalize(comp_normal, dim=-1)
comp_normal = (comp_normal + 1.0) / 2.0 * opacity # for visualization
out.update(
{
"comp_normal": comp_normal.view(batch_size, height, width, 3),
}
)
return out
def update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
) -> None:
def occ_eval_fn(x):
# Query random time for encoding
encoding = self.geometry.encoding.encoding
dynamic = not encoding.static
if dynamic:
frame_time = encoding.frame_time
encoding.frame_time = torch.rand(1).item()
encoding.update_occ_grid = True
density = self.geometry.forward_density(x)
if dynamic:
encoding.frame_time = frame_time
encoding.update_occ_grid = False
# approximate for 1 - torch.exp(-density * self.render_step_size) based on taylor series
return density * self.render_step_size
if self.training and not on_load_weights:
self.estimator.update_every_n_steps(
step=global_step, occ_eval_fn=occ_eval_fn
)
def train(self, mode=True):
self.randomized = mode and self.cfg.randomized
return super().train(mode=mode)
def eval(self):
self.randomized = False
| return super().eval() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: rlawjdghek/StableVITON
# Path: ldm/modules/diffusionmodules/model.py
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
**ignore_kwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
# Path: ldm/modules/diffusionmodules/model.py
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
attn_type="vanilla", **ignorekwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
if self.tanh_out:
h = torch.tanh(h)
return h
# Path: ldm/modules/distributions/distributions.py
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
def sample(self):
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2)
+ self.var - 1.0 - self.logvar,
dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
dim=[1, 2, 3])
def nll(self, sample, dims=[1,2,3]):
if self.deterministic:
return torch.Tensor([0.])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims)
def mode(self):
return self.mean
# Path: ldm/util.py
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# Path: ldm/modules/ema.py
class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_upates=True):
super().__init__()
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
else torch.tensor(-1, dtype=torch.int))
for name, p in model.named_parameters():
if p.requires_grad:
# remove as '.'-character is not allowed in buffers
s_name = name.replace('.', '')
self.m_name2s_name.update({name: s_name})
self.register_buffer(s_name, p.clone().detach().data)
self.collected_params = []
def reset_num_updates(self):
del self.num_updates
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
def forward(self, model):
decay = self.decay
if self.num_updates >= 0:
self.num_updates += 1
decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
sname = self.m_name2s_name[key]
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
else:
assert not key in self.m_name2s_name
def copy_to(self, model):
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
else:
assert not key in self.m_name2s_name
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
# Path: ldm/models/autoencoder.py
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from ldm.util import instantiate_from_config
from ldm.modules.ema import LitEma
class AutoencoderKL(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
ema_decay=None,
learn_logvar=False
):
super().__init__()
self.lossconfig = lossconfig
self.learn_logvar = learn_logvar
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = torch.nn.Identity()
assert ddconfig["double_z"]
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
self.embed_dim = embed_dim
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
self.use_ema = ema_decay is not None
if self.use_ema:
self.ema_decay = ema_decay
assert 0. < ema_decay < 1.
self.model_ema = LitEma(self, decay=ema_decay)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
def init_loss(self):
self.loss = instantiate_from_config(self.lossconfig)
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.parameters())
self.model_ema.copy_to(self)
if context is not None:
print(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self)
def encode(self, x):
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior
def decode(self, z):
z = self.post_quant_conv(z)
| dec = self.decoder(z) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AIFSH/NativeSpeaker
# Path: src/third_part/facelib/detection/align_trans.py
def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):
"""
Function:
----------
get reference 5 key points according to crop settings:
0. Set default crop_size:
if default_square:
crop_size = (112, 112)
else:
crop_size = (96, 112)
1. Pad the crop_size by inner_padding_factor in each side;
2. Resize crop_size into (output_size - outer_padding*2),
pad into output_size with outer_padding;
3. Output reference_5point;
Parameters:
----------
@output_size: (w, h) or None
size of aligned face image
@inner_padding_factor: (w_factor, h_factor)
padding factor for inner (w, h)
@outer_padding: (w_pad, h_pad)
each row is a pair of coordinates (x, y)
@default_square: True or False
if True:
default crop_size = (112, 112)
else:
default crop_size = (96, 112);
!!! make sure, if output_size is not None:
(output_size - outer_padding)
= some_scale * (default crop_size * (1.0 +
inner_padding_factor))
Returns:
----------
@reference_5point: 5x2 np.array
each row is a pair of transformed coordinates (x, y)
"""
tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
# 0) make the inner region a square
if default_square:
size_diff = max(tmp_crop_size) - tmp_crop_size
tmp_5pts += size_diff / 2
tmp_crop_size += size_diff
if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]):
return tmp_5pts
if (inner_padding_factor == 0 and outer_padding == (0, 0)):
if output_size is None:
return tmp_5pts
else:
raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
# check output size
if not (0 <= inner_padding_factor <= 1.0):
raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None):
output_size = tmp_crop_size * \
(1 + inner_padding_factor * 2).astype(np.int32)
output_size += np.array(outer_padding)
if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):
raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])')
# 1) pad the inner region according inner_padding_factor
if inner_padding_factor > 0:
size_diff = tmp_crop_size * inner_padding_factor * 2
tmp_5pts += size_diff / 2
tmp_crop_size += np.round(size_diff).astype(np.int32)
# 2) resize the padded inner region
size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
raise FaceWarpException('Must have (output_size - outer_padding)'
'= some_scale * (crop_size * (1.0 + inner_padding_factor)')
scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
tmp_5pts = tmp_5pts * scale_factor
# size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
# tmp_5pts = tmp_5pts + size_diff / 2
tmp_crop_size = size_bf_outer_pad
# 3) add outer_padding to make output_size
reference_5point = tmp_5pts + np.array(outer_padding)
tmp_crop_size = output_size
return reference_5point
# Path: src/third_part/facelib/detection/align_trans.py
def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):
"""
Function:
----------
apply affine transform 'trans' to uv
Parameters:
----------
@src_img: 3x3 np.array
input image
@facial_pts: could be
1)a list of K coordinates (x,y)
or
2) Kx2 or 2xK np.array
each row or col is a pair of coordinates (x, y)
@reference_pts: could be
1) a list of K coordinates (x,y)
or
2) Kx2 or 2xK np.array
each row or col is a pair of coordinates (x, y)
or
3) None
if None, use default reference facial points
@crop_size: (w, h)
output face image size
@align_type: transform type, could be one of
1) 'similarity': use similarity transform
2) 'cv2_affine': use the first 3 points to do affine transform,
by calling cv2.getAffineTransform()
3) 'affine': use all points to do affine transform
Returns:
----------
@face_img: output face image with size (w, h) = @crop_size
"""
if reference_pts is None:
if crop_size[0] == 96 and crop_size[1] == 112:
reference_pts = REFERENCE_FACIAL_POINTS
else:
default_square = False
inner_padding_factor = 0
outer_padding = (0, 0)
output_size = crop_size
reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding,
default_square)
ref_pts = np.float32(reference_pts)
ref_pts_shp = ref_pts.shape
if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')
if ref_pts_shp[0] == 2:
ref_pts = ref_pts.T
src_pts = np.float32(facial_pts)
src_pts_shp = src_pts.shape
if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')
if src_pts_shp[0] == 2:
src_pts = src_pts.T
if src_pts.shape != ref_pts.shape:
raise FaceWarpException('facial_pts and reference_pts must have the same shape')
if align_type == 'cv2_affine':
tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
elif align_type == 'affine':
tfm = get_affine_transform_matrix(src_pts, ref_pts)
else:
tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
return face_img
# Path: src/third_part/facelib/detection/retinaface/retinaface_net.py
class FPN(nn.Module):
def __init__(self, in_channels_list, out_channels):
super(FPN, self).__init__()
leaky = 0
if (out_channels <= 64):
leaky = 0.1
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
def forward(self, input):
# names = list(input.keys())
# input = list(input.values())
output1 = self.output1(input[0])
output2 = self.output2(input[1])
output3 = self.output3(input[2])
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest')
output2 = output2 + up3
output2 = self.merge2(output2)
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest')
output1 = output1 + up2
output1 = self.merge1(output1)
out = [output1, output2, output3]
return out
# Path: src/third_part/facelib/detection/retinaface/retinaface_net.py
class SSH(nn.Module):
def __init__(self, in_channel, out_channel):
super(SSH, self).__init__()
assert out_channel % 4 == 0
leaky = 0
if (out_channel <= 64):
leaky = 0.1
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)
self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)
self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)
self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
def forward(self, input):
conv3X3 = self.conv3X3(input)
conv5X5_1 = self.conv5X5_1(input)
conv5X5 = self.conv5X5_2(conv5X5_1)
conv7X7_2 = self.conv7X7_2(conv5X5_1)
conv7X7 = self.conv7x7_3(conv7X7_2)
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
out = F.relu(out)
return out
# Path: src/third_part/facelib/detection/retinaface/retinaface_net.py
class MobileNetV1(nn.Module):
def __init__(self):
super(MobileNetV1, self).__init__()
self.stage1 = nn.Sequential(
conv_bn(3, 8, 2, leaky=0.1), # 3
conv_dw(8, 16, 1), # 7
conv_dw(16, 32, 2), # 11
conv_dw(32, 32, 1), # 19
conv_dw(32, 64, 2), # 27
conv_dw(64, 64, 1), # 43
)
self.stage2 = nn.Sequential(
conv_dw(64, 128, 2), # 43 + 16 = 59
conv_dw(128, 128, 1), # 59 + 32 = 91
conv_dw(128, 128, 1), # 91 + 32 = 123
conv_dw(128, 128, 1), # 123 + 32 = 155
conv_dw(128, 128, 1), # 155 + 32 = 187
conv_dw(128, 128, 1), # 187 + 32 = 219
)
self.stage3 = nn.Sequential(
conv_dw(128, 256, 2), # 219 +3 2 = 241
conv_dw(256, 256, 1), # 241 + 64 = 301
)
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(256, 1000)
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.avg(x)
# x = self.model(x)
x = x.view(-1, 256)
x = self.fc(x)
return x
# Path: src/third_part/facelib/detection/retinaface/retinaface_net.py
def make_bbox_head(fpn_num=3, inchannels=64, anchor_num=2):
bboxhead = nn.ModuleList()
for i in range(fpn_num):
bboxhead.append(BboxHead(inchannels, anchor_num))
return bboxhead
# Path: src/third_part/facelib/detection/retinaface/retinaface_net.py
def make_class_head(fpn_num=3, inchannels=64, anchor_num=2):
classhead = nn.ModuleList()
for i in range(fpn_num):
classhead.append(ClassHead(inchannels, anchor_num))
return classhead
# Path: src/third_part/facelib/detection/retinaface/retinaface_net.py
def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2):
landmarkhead = nn.ModuleList()
for i in range(fpn_num):
landmarkhead.append(LandmarkHead(inchannels, anchor_num))
return landmarkhead
# Path: src/third_part/facelib/detection/retinaface/retinaface_utils.py
class PriorBox(object):
def __init__(self, cfg, image_size=None, phase='train'):
super(PriorBox, self).__init__()
self.min_sizes = cfg['min_sizes']
self.steps = cfg['steps']
self.clip = cfg['clip']
self.image_size = image_size
self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]
self.name = 's'
def forward(self):
anchors = []
for k, f in enumerate(self.feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.Tensor(anchors).view(-1, 4)
if self.clip:
output.clamp_(max=1, min=0)
return output
# Path: src/third_part/facelib/detection/retinaface/retinaface_utils.py
def batched_decode(b_loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
b_loc (tensor): location predictions for loc layers,
Shape: [num_batches,num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [1,num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = (
priors[:, :, :2] + b_loc[:, :, :2] * variances[0] * priors[:, :, 2:],
priors[:, :, 2:] * torch.exp(b_loc[:, :, 2:] * variances[1]),
)
boxes = torch.cat(boxes, dim=2)
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
boxes[:, :, 2:] += boxes[:, :, :2]
return boxes
# Path: src/third_part/facelib/detection/retinaface/retinaface_utils.py
def batched_decode_landm(pre, priors, variances):
"""Decode landm from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
pre (tensor): landm predictions for loc layers,
Shape: [num_batches,num_priors,10]
priors (tensor): Prior boxes in center-offset form.
Shape: [1,num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded landm predictions
"""
landms = (
priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:],
)
landms = torch.cat(landms, dim=2)
return landms
# Path: src/third_part/facelib/detection/retinaface/retinaface_utils.py
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
# Path: src/third_part/facelib/detection/retinaface/retinaface_utils.py
def decode_landm(pre, priors, variances):
"""Decode landm from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
pre (tensor): landm predictions for loc layers,
Shape: [num_priors,10]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded landm predictions
"""
tmp = (
priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
)
landms = torch.cat(tmp, dim=1)
return landms
# Path: src/third_part/facelib/detection/retinaface/retinaface_utils.py
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
keep = torchvision.ops.nms(
boxes=torch.Tensor(dets[:, :4]),
scores=torch.Tensor(dets[:, 4]),
iou_threshold=thresh,
)
return list(keep)
# Path: src/third_part/facelib/detection/retinaface/retinaface.py
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter
from ..align_trans import get_reference_facial_points, warp_and_crop_face
from .retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head
from .retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm,
py_cpu_nms)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def generate_config(network_name):
cfg_mnet = {
'name': 'mobilenet0.25',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 32,
'ngpu': 1,
'epoch': 250,
'decay1': 190,
'decay2': 220,
'image_size': 640,
'return_layers': {
'stage1': 1,
'stage2': 2,
'stage3': 3
},
'in_channel': 32,
'out_channel': 64
}
cfg_re50 = {
'name': 'Resnet50',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 24,
'ngpu': 4,
'epoch': 100,
'decay1': 70,
'decay2': 90,
'image_size': 840,
'return_layers': {
'layer2': 1,
'layer3': 2,
'layer4': 3
},
'in_channel': 256,
'out_channel': 256
}
if network_name == 'mobile0.25':
return cfg_mnet
elif network_name == 'resnet50':
return cfg_re50
else:
raise NotImplementedError(f'network_name={network_name}')
class RetinaFace(nn.Module):
def __init__(self, network_name='resnet50', half=False, phase='test'):
super(RetinaFace, self).__init__()
self.half_inference = half
cfg = generate_config(network_name)
self.backbone = cfg['name']
self.model_name = f'retinaface_{network_name}'
self.cfg = cfg
self.phase = phase
self.target_size, self.max_size = 1600, 2150
self.resize, self.scale, self.scale1 = 1., None, None
self.mean_tensor = torch.tensor([[[[104.]], [[117.]], [[123.]]]]).to(device)
self.reference = get_reference_facial_points(default_square=True)
# Build network.
backbone = None
if cfg['name'] == 'mobilenet0.25':
backbone = MobileNetV1()
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
elif cfg['name'] == 'Resnet50':
backbone = models.resnet50(pretrained=False)
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
in_channels_stage2 = cfg['in_channel']
in_channels_list = [
| in_channels_stage2 * 2, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: orhir/PoseAnything
# Path: models/models/backbones/swin_transformer.py
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
fused_window_process (bool, optional): If True, use one kernel to fused window shift & window partition for acceleration, similar for the reversed part. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, fused_window_process=False, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
fused_window_process=fused_window_process)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
# Path: models/models/backbones/swin_transformer_v2.py
class SwinTransformerV2(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
pretrained_window_sizes (tuple(int)): Pretrained window sizes of each layer.
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, pretrained_window_sizes=[0, 0, 0, 0],
multi_scale=False, upsample='deconv', **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
pretrained_window_size=pretrained_window_sizes[i_layer])
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.multi_scale = multi_scale
if self.multi_scale:
self.scales = [1, 2, 4, 4]
self.upsample = nn.ModuleList()
features = [int(embed_dim * 2 ** i) for i in range(1, self.num_layers)] + [self.num_features]
self.multi_scale_fuse = nn.Conv2d(sum(features), self.num_features, 1)
for i in range(self.num_layers):
self.upsample.append(nn.Upsample(scale_factor=self.scales[i]))
else:
if upsample == 'deconv':
self.upsample = nn.ConvTranspose2d(self.num_features, self.num_features, 2, stride=2)
elif upsample == 'new_deconv':
self.upsample = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
nn.Conv2d(self.num_features, self.num_features, 3, stride=1, padding=1),
nn.BatchNorm2d(self.num_features),
nn.ReLU(inplace=True)
)
elif upsample == 'new_deconv2':
self.upsample = nn.Sequential(nn.Upsample(scale_factor=2),
nn.Conv2d(self.num_features, self.num_features, 3, stride=1, padding=1),
nn.BatchNorm2d(self.num_features),
nn.ReLU(inplace=True)
)
elif upsample == 'bilinear':
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
else:
self.upsample = nn.Identity()
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
for bly in self.layers:
bly._init_respostnorm()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {"cpb_mlp", "logit_scale", 'relative_position_bias_table'}
def forward_features(self, x):
B, C, H, W = x.shape
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
if self.multi_scale:
# x_2d = x.view(B, H // 4, W // 4, -1).permute(0, 3, 1, 2) # B C H W
# features = [self.upsample[0](x_2d)]
features = []
for i, layer in enumerate(self.layers):
x = layer(x)
x_2d = x.view(B, H // (8 * self.scales[i]), W // (8 * self.scales[i]), -1).permute(0, 3, 1,
2) # B C H W
features.append(self.upsample[i](x_2d))
x = torch.cat(features, dim=1)
x = self.multi_scale_fuse(x)
x = x.view(B, self.num_features, -1).permute(0, 2, 1)
x = self.norm(x) # B L C
x = x.view(B, H // 8, W // 8, self.num_features).permute(0, 3, 1, 2) # B C H W
else:
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = x.view(B, H // 32, W // 32, self.num_features).permute(0, 3, 1, 2) # B C H W
x = self.upsample(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
# Path: models/models/backbones/simmim.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_
from .swin_transformer import SwinTransformer
from .swin_transformer_v2 import SwinTransformerV2
# --------------------------------------------------------
# SimMIM
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Zhenda Xie
# --------------------------------------------------------
def norm_targets(targets, patch_size):
assert patch_size % 2 == 1
targets_ = targets
targets_count = torch.ones_like(targets)
targets_square = targets ** 2.
targets_mean = F.avg_pool2d(targets, kernel_size=patch_size, stride=1, padding=patch_size // 2,
count_include_pad=False)
targets_square_mean = F.avg_pool2d(targets_square, kernel_size=patch_size, stride=1, padding=patch_size // 2,
count_include_pad=False)
targets_count = F.avg_pool2d(targets_count, kernel_size=patch_size, stride=1, padding=patch_size // 2,
count_include_pad=True) * (patch_size ** 2)
targets_var = (targets_square_mean - targets_mean ** 2.) * (targets_count / (targets_count - 1))
targets_var = torch.clamp(targets_var, min=0.)
targets_ = (targets_ - targets_mean) / (targets_var + 1.e-6) ** 0.5
return targets_
class SwinTransformerForSimMIM(SwinTransformer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
assert self.num_classes == 0
self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
trunc_normal_(self.mask_token, mean=0., std=.02)
def forward(self, x, mask):
x = self.patch_embed(x)
assert mask is not None
B, L, _ = x.shape
mask_tokens = self.mask_token.expand(B, L, -1)
w = mask.flatten(1).unsqueeze(-1).type_as(mask_tokens)
x = x * (1. - w) + mask_tokens * w
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x)
x = x.transpose(1, 2)
B, C, L = x.shape
H = W = int(L ** 0.5)
x = x.reshape(B, C, H, W)
return x
@torch.jit.ignore
def no_weight_decay(self):
return super().no_weight_decay() | {'mask_token'}
class SwinTransformerV2ForSimMIM(SwinTransformerV2):
def __init__(self, **kwargs):
super().__init__(**kwargs)
assert self.num_classes == 0
self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
trunc_normal_(self.mask_token, mean=0., std=.02)
def forward(self, x, mask):
x = self.patch_embed(x)
assert mask is not None
B, L, _ = x.shape
mask_tokens = self.mask_token.expand(B, L, -1)
w = mask.flatten(1).unsqueeze(-1).type_as(mask_tokens)
x = x * (1. - w) + mask_tokens * w
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x)
x = x.transpose(1, 2)
B, C, L = x.shape
H = W = int(L ** 0.5)
x = x.reshape(B, C, H, W)
return x
@torch.jit.ignore
def no_weight_decay(self):
return super().no_weight_decay() | {'mask_token'}
class SimMIM(nn.Module):
def __init__(self, config, encoder, encoder_stride, in_chans, patch_size):
super().__init__()
self.config = config
self.encoder = encoder
| self.encoder_stride = encoder_stride |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: VITA-Group/FSGS
# Path: scene/colmap_loader.py
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
# Path: scene/colmap_loader.py
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
# Path: scene/colmap_loader.py
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec
# Path: scene/colmap_loader.py
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
# Path: scene/colmap_loader.py
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
# Path: scene/colmap_loader.py
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
if xyzs is None:
xyzs = xyz[None, ...]
rgbs = rgb[None, ...]
errors = error[None, ...]
else:
xyzs = np.append(xyzs, xyz[None, ...], axis=0)
rgbs = np.append(rgbs, rgb[None, ...], axis=0)
errors = np.append(errors, error[None, ...], axis=0)
return xyzs, rgbs, errors
# Path: utils/graphics_utils.py
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt)
# Path: utils/graphics_utils.py
def focal2fov(focal, pixels):
return 2*math.atan(pixels/(2*focal))
# Path: utils/graphics_utils.py
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
# Path: utils/general_utils.py
def chamfer_dist(array1, array2):
dist = torch.norm(array1[None] - array2[:, None], 2, dim=-1)
return dist.min(1)[0]
# Path: utils/sh_utils.py
def SH2RGB(sh):
return sh * C0 + 0.5
# Path: scene/gaussian_model.py
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
def __init__(self, args):
def capture(self):
def restore(self, model_args, training_args):
def get_scaling(self):
def get_rotation(self):
def get_xyz(self):
def get_features(self):
def get_opacity(self):
def get_covariance(self, scaling_modifier=1):
def oneupSHdegree(self):
def create_from_pcd(self, pcd: BasicPointCloud, spatial_lr_scale: float):
def training_setup(self, training_args):
def update_learning_rate(self, iteration):
def construct_list_of_attributes(self):
def save_ply(self, path):
def reset_opacity(self):
def load_ply(self, path):
def replace_tensor_to_optimizer(self, tensor, name):
def _prune_optimizer(self, mask):
def dist_prune(self):
def prune_points(self, mask, iter):
def cat_tensors_to_optimizer(self, tensors_dict):
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling,
new_rotation):
def proximity(self, scene_extent, N = 3):
def densify_and_split(self, grads, grad_threshold, scene_extent, iter, N=2):
def densify_and_clone(self, grads, grad_threshold, scene_extent):
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, iter):
def add_densification_stats(self, viewspace_point_tensor, update_filter):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
# Path: scene/dataset_readers.py
import glob
import os
import sys
import matplotlib.pyplot as plt
import imageio
import numpy as np
import json
import cv2
import math
import torch
import open3d as o3d
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, rotmat2qvec, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from utils.general_utils import chamfer_dist
from tqdm import tqdm
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
mask: np.array
bounds: np.array
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras2(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
break
def normalize(x):
| return x / np.linalg.norm(x) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JiahuiLei/GART
# Path: lib_gart/smplx/smplx/lbs.py
def lbs(
betas: Tensor,
pose: Tensor,
v_template: Tensor,
shapedirs: Tensor,
posedirs: Tensor,
J_regressor: Tensor,
parents: Tensor,
lbs_weights: Tensor,
pose2rot: bool = True,
return_T: bool = False, # ! JH 2023.9 modified here
return_posed_v: bool = False, # ! JH 2023.9 modified here
return_A = False, # ! JH 2023.10 modified here
return_J = False, # ! JH 2023.10 modified here
) -> Tuple[Tensor, Tensor]:
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device, dtype = betas.device, betas.dtype
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(pose.view(-1, 3)).view(
[batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(
pose_feature, posedirs).view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
ret_list = [verts, J_transformed]
if return_T:
ret_list.append(T)
if return_posed_v:
ret_list.append(v_posed)
if return_A:
ret_list.append(A)
if return_J:
ret_list.append(J)
return ret_list
# Path: lib_gart/smplx/smplx/lbs.py
def vertices2landmarks(
vertices: Tensor,
faces: Tensor,
lmk_faces_idx: Tensor,
lmk_bary_coords: Tensor
) -> Tensor:
''' Calculates landmarks by barycentric interpolation
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor Fx3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns
-------
landmarks: torch.tensor BxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
'''
# Extract the indices of the vertices for each face
# BxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1).to(torch.long)).view(
batch_size, -1, 3)
#The '.to(torch.long)'.
# added to make the trace work in c++,
# otherwise you get a runtime error in c++:
# 'index_select(): Expected dtype int32 or int64 for index'
lmk_faces += torch.arange(
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
# Path: lib_gart/smplx/smplx/lbs.py
def find_dynamic_lmk_idx_and_bcoords(
vertices: Tensor,
pose: Tensor,
dynamic_lmk_faces_idx: Tensor,
dynamic_lmk_b_coords: Tensor,
neck_kin_chain: List[int],
pose2rot: bool = True,
) -> Tuple[Tensor, Tensor]:
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
dtype = vertices.dtype
batch_size = vertices.shape[0]
if pose2rot:
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3)).view(batch_size, -1, 3, 3)
else:
rot_mats = torch.index_select(
pose.view(batch_size, -1, 3, 3), 1, neck_kin_chain)
rel_rot_mat = torch.eye(
3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).repeat(
batch_size, 1, 1)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
# Path: lib_gart/smplx/smplx/lbs.py
def blend_shapes(betas: Tensor, shape_disps: Tensor) -> Tensor:
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
# Path: lib_gart/smplx/smplx/vertex_ids.py
# Path: lib_gart/smplx/smplx/utils.py
class ModelOutput:
class SMPLOutput(ModelOutput):
class SMPLHOutput(SMPLOutput):
class SMPLXOutput(SMPLHOutput):
class MANOOutput(ModelOutput):
class FLAMEOutput(ModelOutput):
class Struct(object):
def __getitem__(self, key):
def get(self, key, default=None):
def __iter__(self):
def keys(self):
def values(self):
def items(self):
def find_joint_kin_chain(joint_id, kinematic_tree):
def to_tensor(
array: Union[Array, Tensor], dtype=torch.float32
) -> Tensor:
def __init__(self, **kwargs):
def to_np(array, dtype=np.float32):
def rot_mat_to_euler(rot_mats):
T: Optional[Tensor] = None
A: Optional[Tensor] = None
J: Optional[Tensor] = None
# Path: lib_gart/smplx/smplx/vertex_joint_selector.py
class VertexJointSelector(nn.Module):
def __init__(self, vertex_ids=None,
use_hands=True,
use_feet_keypoints=True, **kwargs):
super(VertexJointSelector, self).__init__()
extra_joints_idxs = []
face_keyp_idxs = np.array([
vertex_ids['nose'],
vertex_ids['reye'],
vertex_ids['leye'],
vertex_ids['rear'],
vertex_ids['lear']], dtype=np.int64)
extra_joints_idxs = np.concatenate([extra_joints_idxs,
face_keyp_idxs])
if use_feet_keypoints:
feet_keyp_idxs = np.array([vertex_ids['LBigToe'],
vertex_ids['LSmallToe'],
vertex_ids['LHeel'],
vertex_ids['RBigToe'],
vertex_ids['RSmallToe'],
vertex_ids['RHeel']], dtype=np.int32)
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, feet_keyp_idxs])
if use_hands:
self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
tips_idxs = []
for hand_id in ['l', 'r']:
for tip_name in self.tip_names:
tips_idxs.append(vertex_ids[hand_id + tip_name])
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, tips_idxs])
self.register_buffer('extra_joints_idxs',
to_tensor(extra_joints_idxs, dtype=torch.long))
def forward(self, vertices, joints):
extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs.to(torch.long)) #The '.to(torch.long)'.
# added to make the trace work in c++,
# otherwise you get a runtime error in c++:
# 'index_select(): Expected dtype int32 or int64 for index'
joints = torch.cat([joints, extra_joints], dim=1)
return joints
# Path: lib_gart/smplx/smplx/body_models.py
from typing import Optional, Dict, Union
from .lbs import lbs, vertices2landmarks, find_dynamic_lmk_idx_and_bcoords, blend_shapes
from .vertex_ids import vertex_ids as VERTEX_IDS
from .utils import (
Struct,
to_np,
to_tensor,
Tensor,
Array,
SMPLOutput,
SMPLHOutput,
SMPLXOutput,
MANOOutput,
FLAMEOutput,
find_joint_kin_chain,
)
from .vertex_joint_selector import VertexJointSelector
from collections import namedtuple
import os
import os.path as osp
import pickle
import numpy as np
import torch
import torch.nn as nn
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
TensorOutput = namedtuple(
"TensorOutput",
[
"vertices",
"joints",
"betas",
"expression",
"global_orient",
"body_pose",
"left_hand_pose",
"right_hand_pose",
"jaw_pose",
"transl",
"full_pose",
],
)
class SMPL(nn.Module):
NUM_JOINTS = 23
NUM_BODY_JOINTS = 23
SHAPE_SPACE_DIM = 300
def __init__(
self,
model_path: str,
kid_template_path: str = "",
data_struct: Optional[Struct] = None,
create_betas: bool = True,
betas: Optional[Tensor] = None,
num_betas: int = 10,
create_global_orient: bool = True,
global_orient: Optional[Tensor] = None,
create_body_pose: bool = True,
body_pose: Optional[Tensor] = None,
create_transl: bool = True,
transl: Optional[Tensor] = None,
dtype=torch.float32,
| batch_size: int = 1, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: GongyeLiu/StyleCrafter
# Path: lvdm/modules/networks/ae_modules.py
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
**ignore_kwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# timestep embedding
temb = None
# print(f'encoder-input={x.shape}')
# downsampling
hs = [self.conv_in(x)]
# print(f'encoder-conv in feat={hs[0].shape}')
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
# print(f'encoder-down feat={h.shape}')
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
# print(f'encoder-downsample (input)={hs[-1].shape}')
hs.append(self.down[i_level].downsample(hs[-1]))
# print(f'encoder-downsample (output)={hs[-1].shape}')
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
# print(f'encoder-mid1 feat={h.shape}')
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# print(f'encoder-mid2 feat={h.shape}')
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
# print(f'end feat={h.shape}')
return h
# Path: lvdm/modules/networks/ae_modules.py
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
attn_type="vanilla", **ignorekwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("AE working on z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# print(f'decoder-input={z.shape}')
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# print(f'decoder-conv in feat={h.shape}')
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# print(f'decoder-mid feat={h.shape}')
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
# print(f'decoder-up feat={h.shape}')
if i_level != 0:
h = self.up[i_level].upsample(h)
# print(f'decoder-upsample feat={h.shape}')
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
# print(f'decoder-conv_out feat={h.shape}')
if self.tanh_out:
h = torch.tanh(h)
return h
# Path: lvdm/distributions.py
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
def sample(self, noise=None):
if noise is None:
noise = torch.randn(self.mean.shape)
x = self.mean + self.std * noise.to(device=self.parameters.device)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2)
+ self.var - 1.0 - self.logvar,
dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
dim=[1, 2, 3])
def nll(self, sample, dims=[1,2,3]):
if self.deterministic:
return torch.Tensor([0.])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims)
def mode(self):
return self.mean
# Path: utils/utils.py
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# Path: lvdm/models/autoencoder.py
import os
import torch
import numpy as np
import torch.nn.functional as F
import pytorch_lightning as pl
from contextlib import contextmanager
from einops import rearrange
from lvdm.modules.networks.ae_modules import Encoder, Decoder
from lvdm.distributions import DiagonalGaussianDistribution
from utils.utils import instantiate_from_config
def encode(self, x, **kwargs):
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior
def decode(self, z, **kwargs):
z = self.post_quant_conv(z)
dec = self.decoder(z)
return dec
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return dec, posterior
def get_input(self, batch, k):
x = batch[k]
if x.dim() == 5 and self.input_dim == 4:
b,c,t,h,w = x.shape
self.b = b
self.t = t
x = rearrange(x, 'b c t h w -> (b t) c h w')
return x
def training_step(self, batch, batch_idx, optimizer_idx):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
if optimizer_idx == 0:
# train encoder+decoder+logvar
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return aeloss
if optimizer_idx == 1:
# train the discriminator
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return discloss
def validation_step(self, batch, batch_idx):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
@torch.no_grad()
def log_images(self, batch, only_inputs=False, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if not only_inputs:
xrec, posterior = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
log["reconstructions"] = xrec
log["inputs"] = x
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class IdentityFirstStage(torch.nn.Module):
def __init__(self, *args, vq_interface=False, **kwargs):
self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
super().__init__()
def encode(self, x, *args, **kwargs):
return x
def decode(self, x, *args, **kwargs):
return x
def quantize(self, x, *args, **kwargs):
if self.vq_interface:
return x, None, [None, None, None]
| return x |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: skhu101/GauHuman
# Path: smplx/lbs.py
def lbs(
betas: Tensor,
pose: Tensor,
v_template: Tensor,
shapedirs: Tensor,
posedirs: Tensor,
J_regressor: Tensor,
parents: Tensor,
lbs_weights: Tensor,
pose2rot: bool = True,
) -> Tuple[Tensor, Tensor]:
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device, dtype = betas.device, betas.dtype
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(pose.view(-1, 3)).view(
[batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(
pose_feature, posedirs).view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed, A, T
# Path: smplx/lbs.py
def vertices2landmarks(
vertices: Tensor,
faces: Tensor,
lmk_faces_idx: Tensor,
lmk_bary_coords: Tensor
) -> Tensor:
''' Calculates landmarks by barycentric interpolation
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor Fx3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns
-------
landmarks: torch.tensor BxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
'''
# Extract the indices of the vertices for each face
# BxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1).to(torch.long)).view(
batch_size, -1, 3)
#The '.to(torch.long)'.
# added to make the trace work in c++,
# otherwise you get a runtime error in c++:
# 'index_select(): Expected dtype int32 or int64 for index'
lmk_faces += torch.arange(
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
# Path: smplx/lbs.py
def find_dynamic_lmk_idx_and_bcoords(
vertices: Tensor,
pose: Tensor,
dynamic_lmk_faces_idx: Tensor,
dynamic_lmk_b_coords: Tensor,
neck_kin_chain: List[int],
pose2rot: bool = True,
) -> Tuple[Tensor, Tensor]:
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
dtype = vertices.dtype
batch_size = vertices.shape[0]
if pose2rot:
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3)).view(batch_size, -1, 3, 3)
else:
rot_mats = torch.index_select(
pose.view(batch_size, -1, 3, 3), 1, neck_kin_chain)
rel_rot_mat = torch.eye(
3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).repeat(
batch_size, 1, 1)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
# Path: smplx/lbs.py
def blend_shapes(betas: Tensor, shape_disps: Tensor) -> Tensor:
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
# Path: smplx/vertex_ids.py
# Path: smplx/utils.py
class ModelOutput:
class SMPLOutput(ModelOutput):
class SMPLHOutput(SMPLOutput):
class SMPLXOutput(SMPLHOutput):
class MANOOutput(ModelOutput):
class FLAMEOutput(ModelOutput):
class Struct(object):
def __getitem__(self, key):
def get(self, key, default=None):
def __iter__(self):
def keys(self):
def values(self):
def items(self):
def find_joint_kin_chain(joint_id, kinematic_tree):
def to_tensor(
array: Union[Array, Tensor], dtype=torch.float32
) -> Tensor:
def __init__(self, **kwargs):
def to_np(array, dtype=np.float32):
def rot_mat_to_euler(rot_mats):
# Path: smplx/vertex_joint_selector.py
class VertexJointSelector(nn.Module):
def __init__(self, vertex_ids=None,
use_hands=True,
use_feet_keypoints=True, **kwargs):
super(VertexJointSelector, self).__init__()
extra_joints_idxs = []
face_keyp_idxs = np.array([
vertex_ids['nose'],
vertex_ids['reye'],
vertex_ids['leye'],
vertex_ids['rear'],
vertex_ids['lear']], dtype=np.int64)
extra_joints_idxs = np.concatenate([extra_joints_idxs,
face_keyp_idxs])
if use_feet_keypoints:
feet_keyp_idxs = np.array([vertex_ids['LBigToe'],
vertex_ids['LSmallToe'],
vertex_ids['LHeel'],
vertex_ids['RBigToe'],
vertex_ids['RSmallToe'],
vertex_ids['RHeel']], dtype=np.int32)
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, feet_keyp_idxs])
if use_hands:
self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
tips_idxs = []
for hand_id in ['l', 'r']:
for tip_name in self.tip_names:
tips_idxs.append(vertex_ids[hand_id + tip_name])
extra_joints_idxs = np.concatenate(
[extra_joints_idxs, tips_idxs])
self.register_buffer('extra_joints_idxs',
to_tensor(extra_joints_idxs, dtype=torch.long))
def forward(self, vertices, joints):
extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs.to(torch.long)) #The '.to(torch.long)'.
# added to make the trace work in c++,
# otherwise you get a runtime error in c++:
# 'index_select(): Expected dtype int32 or int64 for index'
joints = torch.cat([joints, extra_joints], dim=1)
return joints
# Path: smplx/body_models.py
from typing import Optional, Dict, Union
from .lbs import (
lbs, vertices2landmarks, find_dynamic_lmk_idx_and_bcoords, blend_shapes)
from .vertex_ids import vertex_ids as VERTEX_IDS
from .utils import (
Struct, to_np, to_tensor, Tensor, Array,
SMPLOutput,
SMPLHOutput,
SMPLXOutput,
MANOOutput,
FLAMEOutput,
find_joint_kin_chain)
from .vertex_joint_selector import VertexJointSelector
from collections import namedtuple
import os
import os.path as osp
import pickle
import numpy as np
import torch
import torch.nn as nn
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
TensorOutput = namedtuple('TensorOutput',
['vertices', 'joints', 'betas', 'expression', 'global_orient', 'body_pose', 'left_hand_pose',
'right_hand_pose', 'jaw_pose', 'v_shaped', 'full_pose', 'A', 'T', 'f'])
class SMPL(nn.Module):
NUM_JOINTS = 23
NUM_BODY_JOINTS = 23
SHAPE_SPACE_DIM = 300
def __init__(
self, model_path: str,
kid_template_path: str = '',
data_struct: Optional[Struct] = None,
create_betas: bool = True,
betas: Optional[Tensor] = None,
num_betas: int = 10,
create_global_orient: bool = True,
global_orient: Optional[Tensor] = None,
create_body_pose: bool = True,
body_pose: Optional[Tensor] = None,
create_transl: bool = True,
transl: Optional[Tensor] = None,
dtype=torch.float32,
batch_size: int = 1,
joint_mapper=None,
gender: str = 'neutral',
| age: str = 'adult', |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: emdgroup/baybe
# Path: baybe/exceptions.py
class NotEnoughPointsLeftError(Exception):
"""
More recommendations are requested than there are viable parameter configurations
left in the search space.
"""
# Path: baybe/searchspace/core.py
class SearchSpace(SerialMixin):
"""Class for managing the overall search space.
The search space might be purely discrete, purely continuous, or hybrid.
Note that created objects related to the computational representations of parameters
(e.g., parameter bounds, computational dataframes, etc.) may use a different
parameter order than what is specified through the constructor: While the
passed parameter list can contain parameters in arbitrary order, the
aforementioned objects (by convention) list discrete parameters first, followed
by continuous ones.
"""
discrete: SubspaceDiscrete = field(factory=SubspaceDiscrete.empty)
"""The (potentially empty) discrete subspace of the overall search space."""
continuous: SubspaceContinuous = field(factory=SubspaceContinuous.empty)
"""The (potentially empty) continuous subspace of the overall search space."""
def __attrs_post_init__(self):
"""Perform validation and record telemetry values."""
validate_parameters(self.parameters)
validate_constraints(self.constraints, self.parameters)
# Telemetry
telemetry_record_value(TELEM_LABELS["COUNT_SEARCHSPACE_CREATION"], 1)
telemetry_record_value(TELEM_LABELS["NUM_PARAMETERS"], len(self.parameters))
telemetry_record_value(
TELEM_LABELS["NUM_CONSTRAINTS"],
len(self.constraints) if self.constraints else 0,
)
@classmethod
def from_product(
cls,
parameters: List[Parameter],
constraints: Optional[List[Constraint]] = None,
empty_encoding: bool = False,
) -> SearchSpace:
"""Create a search space from a cartesian product.
In the search space, optional subsequent constraints are applied.
That is, the discrete subspace becomes the (filtered) cartesian product
containing all discrete parameter combinations while, analogously, the
continuous subspace represents the (filtered) cartesian product of all
continuous parameters.
Args:
parameters: The parameters spanning the search space.
constraints: An optional set of constraints restricting the valid parameter
space.
empty_encoding: If ``True``, uses an "empty" encoding for all parameters.
This is useful, for instance, in combination with random search
strategies that do not read the actual parameter values, since it avoids
the (potentially costly) transformation of the parameter values to their
computational representation.
Returns:
The constructed search space.
"""
# IMPROVE: The arguments get pre-validated here to avoid the potentially costly
# creation of the subspaces. Perhaps there is an elegant way to bypass the
# default validation in the initializer (which is required for other
# ways of object creation) in this particular case.
validate_parameters(parameters)
if constraints:
validate_constraints(constraints, parameters)
else:
constraints = []
discrete: SubspaceDiscrete = SubspaceDiscrete.from_product(
parameters=[
cast(DiscreteParameter, p) for p in parameters if p.is_discrete
],
constraints=[
cast(DiscreteConstraint, c) for c in constraints if c.is_discrete
],
empty_encoding=empty_encoding,
)
continuous: SubspaceContinuous = SubspaceContinuous(
parameters=[
cast(NumericalContinuousParameter, p)
for p in parameters
if not p.is_discrete
],
constraints_lin_eq=[
cast(ContinuousLinearEqualityConstraint, c)
for c in constraints
if isinstance(c, ContinuousLinearEqualityConstraint)
],
constraints_lin_ineq=[
cast(ContinuousLinearInequalityConstraint, c)
for c in constraints
if isinstance(c, ContinuousLinearInequalityConstraint)
],
)
return SearchSpace(discrete=discrete, continuous=continuous)
@property
def parameters(self) -> List[Parameter]:
"""Return the list of parameters of the search space."""
return self.discrete.parameters + self.continuous.parameters
@property
def constraints(self) -> List[Constraint]:
"""Return the constraints of the search space."""
return (
self.discrete.constraints
+ self.continuous.constraints_lin_eq
+ self.continuous.constraints_lin_ineq
)
@property
def type(self) -> SearchSpaceType:
"""Return the type of the search space."""
if self.discrete.is_empty and not self.continuous.is_empty:
return SearchSpaceType.CONTINUOUS
if not self.discrete.is_empty and self.continuous.is_empty:
return SearchSpaceType.DISCRETE
if not self.discrete.is_empty and not self.continuous.is_empty:
return SearchSpaceType.HYBRID
raise RuntimeError("This line should be impossible to reach.")
@property
def contains_mordred(self) -> bool:
"""Indicates if any of the discrete parameters uses ``MORDRED`` encoding."""
return any(
p.encoding is SubstanceEncoding.MORDRED for p in self.discrete.parameters
)
@property
def contains_rdkit(self) -> bool:
"""Indicates if any of the discrete parameters uses ``RDKIT`` encoding."""
return any(
p.encoding is SubstanceEncoding.RDKIT for p in self.discrete.parameters
)
@property
def param_bounds_comp(self) -> torch.Tensor:
"""Return bounds as tensor."""
return torch.hstack(
[self.discrete.param_bounds_comp, self.continuous.param_bounds_comp]
)
@property
def task_idx(self) -> Optional[int]:
"""The column index of the task parameter in computational representation."""
try:
# TODO [16932]: Redesign metadata handling
task_param = next(
p for p in self.parameters if isinstance(p, TaskParameter)
)
except StopIteration:
return None
# TODO[11611]: The current approach has two limitations:
# 1. It matches by column name and thus assumes that the parameter name
# is used as the column name.
# 2. It relies on the current implementation detail that discrete parameters
# appear first in the computational dataframe.
# --> Fix this when refactoring the data
return self.discrete.comp_rep.columns.get_loc(task_param.name)
@property
def n_tasks(self) -> int:
"""The number of tasks encoded in the search space."""
# TODO [16932]: This approach only works for a single task parameter. For
# multiple task parameters, we need to align what the output should even
# represent (e.g. number of combinatorial task combinations, number of
# tasks per task parameter, etc).
try:
task_param = next(
p for p in self.parameters if isinstance(p, TaskParameter)
)
return len(task_param.values)
# When there are no task parameters, we effectively have a single task
except StopIteration:
return 1
def transform(
self,
data: pd.DataFrame,
) -> pd.DataFrame:
"""Transform data from experimental to computational representation.
This function can e.g. be used to transform data obtained from measurements.
Continuous parameters are not transformed but included.
Args:
data: The data to be transformed. Must contain all specified parameters, can
contain more columns.
Returns:
A dataframe with the parameters in computational representation.
"""
# Transform subspaces separately
df_discrete = self.discrete.transform(data)
df_continuous = self.continuous.transform(data)
# Combine Subspaces
comp_rep = pd.concat([df_discrete, df_continuous], axis=1)
return comp_rep
# Path: baybe/searchspace/core.py
class SearchSpaceType(Enum):
"""Enum class for different types of search spaces and respective compatibility."""
DISCRETE = "DISCRETE"
"""Flag for discrete search spaces resp. compatibility with discrete search
spaces."""
CONTINUOUS = "CONTINUOUS"
"""Flag for continuous search spaces resp. compatibility with continuous
search spaces."""
EITHER = "EITHER"
"""Flag compatibility with either discrete or continuous, but not hybrid
search spaces."""
HYBRID = "HYBRID"
"""Flag for hybrid search spaces resp. compatibility with hybrid search spaces."""
# Path: baybe/utils/serialization.py
_T = TypeVar("_T")
class SerialMixin:
def to_dict(self) -> dict:
def from_dict(cls: Type[_T], dictionary: dict) -> _T:
def to_json(self) -> str:
def from_json(cls: Type[_T], string: str) -> _T:
def unstructure_base(base: Any, overrides: Optional[dict] = None) -> dict:
def get_base_structure_hook(
base: Type[_T],
overrides: Optional[dict] = None,
) -> Callable[[dict, Type[_T]], _T]:
def structure_base(val: dict, _: Type[_T]) -> _T:
def _structure_dataframe_hook(string: str, _) -> pd.DataFrame:
def _unstructure_dataframe_hook(df: pd.DataFrame) -> str:
def block_serialization_hook(obj: Any) -> None: # noqa: DOC101, DOC103
def block_deserialization_hook(_: Any, cls: type) -> None: # noqa: DOC101, DOC103
# Path: baybe/recommenders/base.py
from abc import ABC, abstractmethod
from typing import Callable, ClassVar, Optional
from attrs import define
from baybe.exceptions import NotEnoughPointsLeftError
from baybe.searchspace import SearchSpace, SearchSpaceType
from baybe.utils.serialization import (
converter,
get_base_structure_hook,
unstructure_base,
)
import pandas as pd
Args:
searchspace: The search space in which experiments are being conducted.
batch_quantity: The number of points that should be recommended.
train_x: The training data used to train the model.
train_y: The training labels used to train the model.
allow_repeated_recommendations: Allow to make recommendations that were
already recommended earlier. This only has an influence in discrete
search spaces.
allow_recommending_already_measured: Allow to output recommendations that
were measured previously. This only has an influence in discrete
search spaces.
Returns:
A DataFrame containing the recommendations as individual rows.
"""
@define
class NonPredictiveRecommender(Recommender, ABC):
"""Abstract base class for recommenders that are non-predictive."""
def recommend( # noqa: D102
self,
searchspace: SearchSpace,
batch_quantity: int = 1,
train_x: Optional[pd.DataFrame] = None,
train_y: Optional[pd.DataFrame] = None,
allow_repeated_recommendations: bool = False,
allow_recommending_already_measured: bool = True,
) -> pd.DataFrame:
# See base class.
if searchspace.type == SearchSpaceType.DISCRETE:
return _select_candidates_and_recommend(
searchspace,
self._recommend_discrete,
batch_quantity,
allow_repeated_recommendations,
allow_recommending_already_measured,
)
if searchspace.type == SearchSpaceType.CONTINUOUS:
return self._recommend_continuous(
searchspace=searchspace, batch_quantity=batch_quantity
)
return self._recommend_hybrid(
searchspace=searchspace, batch_quantity=batch_quantity
)
def _recommend_discrete(
self,
searchspace: SearchSpace,
candidates_comp: pd.DataFrame,
batch_quantity: int,
) -> pd.Index:
"""Calculate recommendations in a discrete search space.
Args:
searchspace: The discrete search space in which the recommendations should
be made.
candidates_comp: The computational representation of all possible candidates
batch_quantity: The size of the calculated batch.
Raises:
NotImplementedError: If the function is not implemented by the child class.
Returns:
The indices of the recommended points with respect to the
computational representation.
"""
try:
return self._recommend_hybrid(
searchspace=searchspace,
batch_quantity=batch_quantity,
candidates_comp=candidates_comp,
).index
except NotImplementedError as exc:
raise NotImplementedError(
"""Hybrid recommender could not be used as fallback when trying to
optimize a discrete space. This is probably due to your search space and
recommender not being compatible. Please verify that your search space
is purely discrete and that you are either using a discrete or hybrid
recommender."""
) from exc
def _recommend_continuous(
self, searchspace: SearchSpace, batch_quantity: int
) -> pd.DataFrame:
"""Calculate recommendations in a continuous search space.
Args:
searchspace: The continuous search space in which the recommendations should
be made.
batch_quantity: The size of the calculated batch.
Raises:
NotImplementedError: If the function is not implemented by the child class.
Returns:
The recommended points.
"""
# If this method is not implemented by a children class, try to call
# _recommend_hybrid instead.
try:
return self._recommend_hybrid(
searchspace=searchspace, batch_quantity=batch_quantity
)
except NotImplementedError as exc:
raise NotImplementedError(
"""Hybrid recommender could not be used as fallback when trying to
optimize a continuous space. This is probably due to your search space
and recommender not being compatible. Please verify that your
search space is purely continuous and that you are either using a
continuous or hybrid recommender."""
) from exc
def _recommend_hybrid(
self,
searchspace: SearchSpace,
batch_quantity: int,
candidates_comp: Optional[pd.DataFrame] = None,
| ) -> pd.DataFrame: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NJU-3DV/Relightable3DGaussian
# Path: scene/colmap_loader.py
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
# Path: scene/colmap_loader.py
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2]])
# Path: scene/colmap_loader.py
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24 * num_points2D,
format_char_sequence="ddq" * num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8 * num_params,
format_char_sequence="d" * num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
# Path: scene/colmap_loader.py
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8 * track_length,
format_char_sequence="ii" * track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
# Path: scene/colmap_loader.py
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
if xyzs is None:
xyzs = xyz[None, ...]
rgbs = rgb[None, ...]
errors = error[None, ...]
else:
xyzs = np.append(xyzs, xyz[None, ...], axis=0)
rgbs = np.append(rgbs, rgb[None, ...], axis=0)
errors = np.append(errors, error[None, ...], axis=0)
return xyzs, rgbs, errors
# Path: utils/graphics_utils.py
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = getWorld2View(R, t)
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt)
# Path: utils/graphics_utils.py
def focal2fov(focal, pixels):
return 2 * math.atan(pixels / (2 * focal))
# Path: utils/graphics_utils.py
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
# Path: utils/sh_utils.py
def SH2RGB(sh):
return sh * C0 + 0.5
# Path: scene/gaussian_model.py
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
def __init__(self, sh_degree: int, render_type='render'):
def set_transform(self, rotation=None, center=None, scale=None, offset=None, transform=None):
def capture(self):
def restore(self, model_args, training_args,
is_training=False, restore_optimizer=True):
def get_scaling(self):
def get_rotation(self):
def get_xyz(self):
def get_normal(self):
def get_shs(self):
def get_incidents(self):
def get_visibility(self):
def get_opacity(self):
def get_base_color(self):
def get_roughness(self):
def get_metallic(self):
def get_brdf(self):
def get_by_names(self, names):
def split_by_names(self, features, names):
def get_covariance(self, scaling_modifier=1):
def get_inverse_covariance(self, scaling_modifier=1):
def oneupSHdegree(self):
def attribute_names(self):
def finetune_visibility(self, iterations=1000):
def create_from_gaussians(cls, gaussians_list, dataset):
def create_from_ckpt(self, checkpoint_path, restore_optimizer=False):
def create_from_pcd(self, pcd: BasicPointCloud, spatial_lr_scale: float):
def training_setup(self, training_args: OptimizationParams):
def step(self):
def update_learning_rate(self, iteration):
def construct_list_of_attributes(self):
def save_ply(self, path):
def reset_opacity(self):
def load_ply(self, path):
def replace_tensor_to_optimizer(self, tensor, name):
def _prune_optimizer(self, mask):
def prune_points(self, mask):
def cat_tensors_to_optimizer(self, tensors_dict):
def densification_postfix(self, new_xyz, new_normal, new_shs_dc, new_shs_rest, new_opacities, new_scaling,
new_rotation, new_base_color=None, new_roughness=None,
new_metallic=None, new_incidents_dc=None, new_incidents_rest=None,
new_visibility_dc=None, new_visibility_rest=None):
def densify_and_split(self, grads, grad_threshold, scene_extent, grads_normal, grad_normal_threshold, N=2):
def densify_and_clone(self, grads, grad_threshold, scene_extent, grads_normal, grad_normal_threshold):
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, max_grad_normal):
def prune(self, min_opacity, extent, max_screen_size):
def add_densification_stats(self, viewspace_point_tensor, update_filter):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
# Path: scene/dataset_readers.py
import re
import os
import sys
import glob
import json
import numpy as np
import imageio.v2 as imageio
import pyexr
import pdb;
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
from tqdm import tqdm
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png", debug=False):
cam_infos = []
read_mvs = False
mvs_dir = f"{path}/extra"
if os.path.exists(mvs_dir) and "train" not in transformsfile:
print("Loading mvs as geometry constraint.")
read_mvs = True
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(tqdm(frames, leave=False)):
image_path = os.path.join(path, frame["file_path"] + extension)
image_name = Path(image_path).stem
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3, :3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image, is_hdr = load_img(image_path)
bg = np.array([1, 1, 1]) if white_background else np.array([0, 0, 0])
image_mask = np.ones_like(image[..., 0])
if image.shape[-1] == 4:
image_mask = image[:, :, 3]
image = image[:, :, :3] * image[:, :, 3:4] + bg * (1 - image[:, :, 3:4])
# read depth and mask
depth = None
normal = None
if read_mvs:
depth_path = os.path.join(mvs_dir + "/depths/", os.path.basename(frame["file_path"]) + ".tiff")
normal_path = os.path.join(mvs_dir + "/normals/", os.path.basename(frame["file_path"]) + ".pfm")
depth = load_depth(depth_path)
normal = load_pfm(normal_path)
depth = depth * image_mask
normal = normal * image_mask[..., np.newaxis]
fovy = focal2fov(fov2focal(fovx, image.shape[0]), image.shape[1])
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=fovy, FovX=fovx, image=image, image_mask=image_mask,
image_path=image_path, depth=depth, normal=normal, image_name=image_name,
width=image.shape[1], height=image.shape[0], hdr=is_hdr))
if debug and idx >= 5:
break
return cam_infos
def readNerfSyntheticInfo(path, white_background, eval, extension=".png", debug=False):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension, debug=debug)
if eval:
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension,
debug=debug)
else:
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
normals = np.random.randn(*xyz.shape)
normals /= np.linalg.norm(normals, axis=-1, keepdims=True)
storePly(ply_path, xyz, SH2RGB(shs) * 255, normals)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def loadCamsFromScene(path, valid_list, white_background, debug):
with open(f'{path}/sfm_scene.json') as f:
sfm_scene = json.load(f)
# load bbox transform
bbox_transform = np.array(sfm_scene['bbox']['transform']).reshape(4, 4)
bbox_transform = bbox_transform.copy()
bbox_transform[[0, 1, 2], [0, 1, 2]] = bbox_transform[[0, 1, 2], [0, 1, 2]].max() / 2
bbox_inv = np.linalg.inv(bbox_transform)
# meta info
image_list = sfm_scene['image_path']['file_paths']
# camera parameters
| train_cam_infos = [] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: UX-Decoder/LLaVA-Grounding
# Path: llava/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: llava/mm_utils.py
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
# Path: llava/constants.py
IGNORE_INDEX = -100
# Path: llava/constants.py
DEFAULT_IMAGE_TOKEN = "<image>"
# Path: llava/eval/llava_mapper.py
class COCOInstanceNewBaselineDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer.
This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
tfm_gens,
image_format,
tokenizer,
image_processor,
preprocess,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
tfm_gens: data augmentation
image_format: an image format supported by :func:`detection_utils.read_image`.
"""
self.tfm_gens = tfm_gens
logging.getLogger(__name__).info(
"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}".format(str(self.tfm_gens))
)
self.img_format = image_format
self.is_train = is_train
self.tokenizer = tokenizer
self.processor = image_processor
self.preprocess = preprocess
@classmethod
def from_config(cls, cfg, is_train=True,tokenizer=None,image_processor=None,preprocess=None):
# Build augmentation
tfm_gens = build_transform_gen(cfg, is_train)
ret = {
"is_train": is_train,
"tfm_gens": tfm_gens,
"image_format": cfg['INPUT']['FORMAT'],
"tokenizer": tokenizer,
"image_processor": image_processor,
"preprocess": preprocess,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
#########llava image processing
image_clip = self.processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
dataset_dict["image_clip"] = image_clip
##################
# TODO: get padding mask
# by feeding a "segmentation mask" to the same transforms
padding_mask = np.ones(image.shape[:2])
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
dataset_dict["image_ori"]=image
# the crop transformation has default padding value 0 for segmentation
padding_mask = transforms.apply_segmentation(padding_mask)
padding_mask = ~ padding_mask.astype(bool)
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
dataset_dict["padding_mask"] = torch.as_tensor(np.ascontiguousarray(padding_mask))
num_conversations = len(dataset_dict['conversations'])
rd = np.random.choice(num_conversations)
# selected_conversation, grounding_list = dataset_dict['conversations'][rd]
# dataset_dict['conversation'] = [selected_conversation]
selected_conversation = [aa[0] for aa in dataset_dict['conversations']]
dataset_dict['conversation'] = selected_conversation
sources = preprocess_multimodal(
copy.deepcopy(dataset_dict['conversation']),
True) #! Debug here
# sources = copy.deepcopy(dataset_dict['conversation'])
data_dict_conversation = self.preprocess(
sources,
self.tokenizer,
has_image=True)
data_dict_conversation = dict(input_ids=data_dict_conversation["input_ids"][0],
labels=data_dict_conversation["labels"][0])
dataset_dict.update(data_dict_conversation)
dataset_dict['tokenizer'] = self.tokenizer
num_segs = 1 # sum([conv['value'].count('<seg>') for conv in selected_conversation])
# grounding_list=
if "grounding_info" in dataset_dict and len(dataset_dict['grounding_info'])>0:
anno_id2id=dict()
for id,obj in enumerate(dataset_dict['grounding_info']):
obj["bbox_mode"] = BoxMode.XYWH_ABS
anno_id2id[obj['id']]=id
id2class=[[] for _ in range(len(dataset_dict['grounding_info']))]
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict["grounding_info"]
]
# assert "segmentation" in annos[0]
instances = utils.annotations_to_instances(annos, image_shape,mask_format="bitmask")
h, w = instances.image_size
# image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)
if hasattr(instances, 'gt_masks'):
gt_masks = instances.gt_masks
# gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
instances.gt_masks = gt_masks.tensor
if grounding_list is None:
dataset_dict['grounding']=False
grounding_mask=[False for _ in range(num_segs)]
dataset_dict['grounding_mask']=grounding_mask
else:
grounding_mask=[True if g is not None else False for g in grounding_list]
dataset_dict['grounding_mask']=grounding_mask
new_grounding_list=[g for g in grounding_list if g is not None]
if sum(grounding_mask)==0:
dataset_dict['grounding']=False
else:
dataset_dict['grounding']=True
if dataset_dict['grounding']:
# assert num_segs == len(grounding_list)
for grounding_id,grounding in enumerate(new_grounding_list):
if grounding is not None:
for annid in grounding:
id2class[anno_id2id[annid]].append(grounding_id)
instances.gt_classes=id2class
dataset_dict["instances"] = instances
else:
dataset_dict['grounding'] = False
grounding_mask = [False for _ in range(num_segs)]
dataset_dict['grounding_mask'] = grounding_mask
return [dataset_dict]
# Path: llava/eval/LLaVA_G_Eval.py
import os
import cv2
import json
import torch
import collections
import transformers
import numpy as np
import jsonlines
import jsonlines
import os
import shutil
import os
import shutil
import re
import re
import cv2
import argparse
from llava.model import *
from typing import Dict
from llava import conversation as conversation_lib
from tqdm import tqdm
from detectron2.utils.file_io import PathManager
from llava.mm_utils import tokenizer_image_token
from llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN
from llava.eval.llava_mapper import COCOInstanceNewBaselineDatasetMapper as LLAVAInstanceNewBaselineDatasetMapper
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
from llava.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from huggingface_hub import hf_hub_download
from peft import PeftModel
from peft import PeftModel
from detectron2.config import LazyConfig
from llava.model.openseed import build_model
from llava.model.openseed.BaseModel import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
from llava.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from huggingface_hub import hf_hub_download
from peft import PeftModel
from peft import PeftModel
from detectron2.config import LazyConfig
from llava.model.openseed import build_model
from llava.model.openseed.BaseModel import BaseModel
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading LLaVA from base model...')
model = LlavaLlamaForCausalLM_gd.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional LLaVA weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
elif model_base is not None:
# this may be mm projector only
print('Loading LLaVA from base model...')
if 'mpt' in model_name.lower():
if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):
shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
model = LlavaLlamaForCausalLM_gd.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = LlavaLlamaForCausalLM_gd.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
model.load_state_dict(mm_projector_weights, strict=False)
else:
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = LlavaLlamaForCausalLM_gd.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = LlavaLlamaForCausalLM_gd.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
# Load language model
if model_base is not None:
# PEFT model
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
print('Convert to FP16...')
model.to(torch.float16)
else:
use_fast = False
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if 'llava' in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
model.resize_token_embeddings(len(tokenizer))
vision_tower = model.get_vision_tower()
if not vision_tower.is_loaded:
vision_tower.load_model()
vision_tower.to(device='cuda', dtype=torch.float16)
image_processor = vision_tower.image_processor
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len
def construct_vision_model(self, path_vision_model_cfg):
def get_config_from_name(cfg, dataset_name="flickr"):
# adjust config according to dataset, flickr by default
if 'sam' in dataset_name:
cfg.update(cfg['SAM'])
return cfg
elif 'flickr' in dataset_name:
cfg.update(cfg['flickr'])
return cfg
elif 'coco_instruct_train' in dataset_name:
cfg.update(cfg['coco_instruct'])
return cfg
elif 'lisa' in dataset_name:
cfg.update(cfg['LISA_REF'])
return cfg
elif 'llava' in dataset_name:
cfg.update(cfg['llava'])
return cfg
elif 'vg' in dataset_name:
cfg.update(cfg['vg'])
return cfg
| elif 'part' in dataset_name and 'pascal_part' not in dataset_name and 'partimagenet' not in dataset_name: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daveredrum/SceneTex
# Path: lib/camera_helper.py
def init_trajectory(dist_list, elev_list, azim_list, at):
Rs, Ts = [], []
for dist, elev, azim in zip(dist_list, elev_list, azim_list):
R, T = look_at_view_transform(dist, elev, azim, at=at)
Rs.append(R) # 1, 3, 3
Ts.append(T) # 1, 3
return Rs, Ts
# Path: lib/camera_helper.py
def init_blenderproc_trajectory(trajectory, device):
"""
This function only applies for Blenderproc cameras and original mesh data
"""
Rs, Ts = [], []
for _, viewpoint in trajectory.items():
c2w = torch.FloatTensor(viewpoint["matrix"]).to(device)
calibrate_axis = torch.FloatTensor([
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
]).to(device)
rot_z = torch.FloatTensor([
[np.cos(np.pi), -np.sin(np.pi), 0],
[np.sin(np.pi), np.cos(np.pi), 0],
[0, 0, 1]
]).to(device)
rot_x = torch.FloatTensor([
[1, 0, 0, 0],
[0, np.cos(np.pi/2), -np.sin(np.pi/2), 0],
[0, np.sin(np.pi/2), np.cos(np.pi/2), 0],
[0, 0, 0, 1]
]).to(device)
c2w = calibrate_axis @ c2w
c2w = rot_x @ c2w
t = c2w[:3,-1] # Extract translation of the camera
r = c2w[:3, :3] @ rot_z # Extract rotation matrix of the camera
# horizontally flip the image
flip_x = torch.FloatTensor([
[-1, 0, 0],
[0, 1, 0],
[0, 0, 1]
]).to(device)
r = r @ flip_x
t = t @ r # Make rotation local
Rs.append(r.unsqueeze(0))
Ts.append(t.unsqueeze(0))
return Rs, Ts
# Path: lib/camera_helper.py
def init_camera_R_T(R, T, image_size, device, fov=60):
"""init camera using R and T matrics
Args:
R (torch.FloatTensor): Rotation matrix, (N, 3, 3)
T (torch.FloatTensor): Translation matrix, (N, 3)
image_size (int): rendering size
device (torch.device): CPU or GPU
Returns:
camera: PyTorch3D camera instance
"""
if isinstance(image_size, int):
image_size = torch.tensor([image_size, image_size]).unsqueeze(0)
elif isinstance(image_size, tuple):
image_size = torch.tensor(image_size).unsqueeze(0)
else:
raise TypeError("invalid image size.")
# cameras = PerspectiveCameras(R=R, T=T, device=device, image_size=image_size)
cameras = FoVPerspectiveCameras(R=R, T=T, device=device, fov=fov)
return cameras
# Path: lib/render_helper.py
def init_renderer(camera, shader, image_size, faces_per_pixel):
raster_settings = RasterizationSettings(image_size=image_size, faces_per_pixel=faces_per_pixel)
renderer = MeshRendererWithFragments(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings
),
shader=shader
)
return renderer
# Path: lib/shading_helper.py
def init_flat_texel_shader(camera, device, blend_params=BlendParams()):
shader=FlatTexelShader(
cameras=camera,
device=device,
blend_params=blend_params
)
return shader
# Path: lib/projection_helper.py
@torch.no_grad()
def get_visible_pixel_uvs(mesh, renderer, faces_verts_uvs):
fragments = renderer.rasterizer(mesh)
pixel_uvs = interpolate_face_attributes(
fragments.pix_to_face, fragments.bary_coords, faces_verts_uvs
) # NxHsxWsxKx2
return pixel_uvs
# Path: lib/projection_helper.py
def get_all_4_locations(values_y, values_x):
y_0 = torch.floor(values_y)
y_1 = torch.ceil(values_y)
x_0 = torch.floor(values_x)
x_1 = torch.ceil(values_x)
return torch.cat([y_0, y_0, y_1, y_1], 0).long(), torch.cat([x_0, x_1, x_0, x_1], 0).long()
# Path: models/modules/modules.py
class MLP(nn.Module):
def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True, dtype=torch.float32):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_hidden = dim_hidden
self.num_layers = num_layers
net = []
for l in range(num_layers):
net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden,
self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias, dtype=dtype))
self.net = nn.ModuleList(net)
def forward(self, x):
for l in range(self.num_layers):
x = self.net[l](x)
if l != self.num_layers - 1:
x = F.relu(x, inplace=True)
return x
# Path: models/modules/modules.py
class Siren(nn.Module):
def __init__(self, in_features, hidden_features, hidden_layers, out_features, outermost_linear=False,
first_omega_0=30, hidden_omega_0=30.):
super().__init__()
self.net = []
self.net.append(SineLayer(in_features, hidden_features,
is_first=True, omega_0=first_omega_0))
for i in range(hidden_layers):
self.net.append(SineLayer(hidden_features, hidden_features,
is_first=False, omega_0=hidden_omega_0))
if outermost_linear:
final_linear = nn.Linear(hidden_features, out_features)
with torch.no_grad():
final_linear.weight.uniform_(-np.sqrt(6 / hidden_features) / hidden_omega_0,
np.sqrt(6 / hidden_features) / hidden_omega_0)
self.net.append(final_linear)
else:
self.net.append(SineLayer(hidden_features, out_features,
is_first=False, omega_0=hidden_omega_0))
self.net = nn.Sequential(*self.net)
def forward(self, coords):
outputs = self.net(coords)
return outputs
# Path: models/modules/modules.py
class HashGrid(nn.Module):
def __init__(self, in_channels,
otype, n_levels, n_features_per_level, log2_hashmap_size, base_resolution, # the same as in tinycudann
max_resolution, # NOTE need to compute per_level_scale ,
dtype=torch.float32 # half precision might lead to NaN
):
super().__init__()
self.otype = otype
self.n_levels = n_levels
self.n_features_per_level = n_features_per_level
self.log2_hashmap_size = log2_hashmap_size
self.base_resolution = base_resolution
self.max_resolution = max_resolution
self.per_level_scale = self.get_per_level_scale()
self.config = {
"otype": self.otype,
"n_levels": self.n_levels,
"n_features_per_level": self.n_features_per_level,
"log2_hashmap_size": self.log2_hashmap_size,
"base_resolution": self.base_resolution,
"per_level_scale": self.per_level_scale
}
self.hashgrid = tcnn.Encoding(in_channels, self.config, dtype=dtype)
def get_per_level_scale(self):
return np.power(self.max_resolution / self.base_resolution, 1 / self.n_levels)
def forward(self, inputs):
return self.hashgrid(inputs)
# Path: models/modules/modules.py
class HashGridMLP(nn.Module):
def __init__(self, in_channels,
hashgrid_config, mlp_config
):
super().__init__()
self.hashgrid_config = {
"otype": hashgrid_config.otype,
"n_levels": hashgrid_config.n_levels,
"n_features_per_level": hashgrid_config.n_features_per_level,
"log2_hashmap_size": hashgrid_config.log2_hashmap_size,
"base_resolution": hashgrid_config.base_resolution,
"per_level_scale": self.get_per_level_scale(
hashgrid_config.max_resolution,
hashgrid_config.base_resolution,
hashgrid_config.n_levels
)
}
self.MLP_config = {
"otype": mlp_config.otype,
"activation": mlp_config.activation,
"output_activation": mlp_config.output_activation,
"n_neurons": mlp_config.n_neurons,
"n_hidden_layers": mlp_config.n_hidden_layers
}
self.net = tcnn.NetworkWithInputEncoding(in_channels, mlp_config.out_channels, self.hashgrid_config, self.MLP_config)
def get_per_level_scale(self, max_resolution, base_resolution, n_levels):
return np.power(max_resolution / base_resolution, 1 / n_levels)
def forward(self, inputs):
return self.net(inputs)
# Path: models/modules/anchors.py
class AnchorTransformer(nn.Module):
def __init__(self,
config,
device,
anchor_dim,
num_instances # this must be specified on init
):
super().__init__()
self.config = config
self.device = device
self.anchor_dim = anchor_dim
self.num_instances = num_instances
self.hidden_size = config.anchor_config.hidden_size
self.num_heads = config.anchor_config.num_heads
self.num_mapping_layers = config.anchor_config.num_mapping_layers
if self.config.anchor_config.anchor_type == "self-attention":
if self.num_mapping_layers == 0 and self.num_heads == 1:
self.hidden_size = anchor_dim
self.map_key = nn.Identity()
self.map_query = nn.Identity()
self.map_value = nn.Identity()
self.attention = nn.MultiheadAttention(
anchor_dim,
1,
batch_first=True # (batch, seq, feature)
)
self.map_outputs = nn.Identity()
else:
self.map_key = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.map_query = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.map_value = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.attention = nn.MultiheadAttention(
self.hidden_size * self.num_heads,
self.num_heads,
batch_first=True # (batch, seq, feature)
)
self.map_outputs = MLP(self.hidden_size * self.num_heads, anchor_dim, self.hidden_size, self.num_mapping_layers)
elif self.config.anchor_config.anchor_type == "cross-attention":
if self.num_mapping_layers == 0 and self.num_heads == 1:
self.hidden_size = anchor_dim
self.map_key = nn.Identity()
self.map_query = nn.Identity()
self.map_value = nn.Identity()
self.attention = nn.MultiheadAttention(
anchor_dim,
1,
batch_first=True # (batch, seq, feature)
)
self.map_outputs = nn.Identity()
else:
self.map_key = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.map_query = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.map_value = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.attention = nn.MultiheadAttention(
self.hidden_size * self.num_heads,
self.num_heads,
batch_first=True # (batch, seq, feature)
)
self.map_outputs = MLP(self.hidden_size * self.num_heads, anchor_dim, self.hidden_size, self.num_mapping_layers)
elif self.config.anchor_config.anchor_type == "flash-attention":
if self.num_mapping_layers == 0 and self.num_heads == 1:
self.hidden_size = anchor_dim
self.map_key = nn.Identity()
self.map_query = nn.Identity()
self.map_value = nn.Identity()
self.map_outputs = nn.Identity()
else:
self.map_key = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.map_query = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.map_value = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)
self.map_outputs = MLP(self.hidden_size * self.num_heads, anchor_dim, self.hidden_size, self.num_mapping_layers)
self.attention = FlashCrossAttention() # NOTE input must be half precision
def _map_inputs(self, query, key, value):
if self.config.anchor_config.output_type == "token":
# inputs = torch.cat([inputs, self.embedding.unsqueeze(1)], dim=1)
avg_token = query.mean(1, keepdim=True)
query = torch.cat([query, avg_token], dim=1)
key = self.map_key(key)
query = self.map_query(query)
value = self.map_value(value)
return query, key, value
def _map_outputs(self, inputs):
outputs = self.map_outputs(inputs) # (M, L, C)
if self.config.anchor_config.output_type == "token":
outputs = outputs[:, -1, :]
elif self.config.anchor_config.output_type == "mean":
outputs = outputs.mean(1)
else:
pass
return outputs
def _map_features(self, features, anchors, instances_in_view, pad_seq=False):
B, H, W, C = features.shape
features = features.reshape(-1, C)
instances_in_view = instances_in_view.reshape(-1)
labels = torch.unique(instances_in_view).long()
# outputs
seq_features, seq_anchors, seq_labels, seqlens, seqlens_k = [], [], [], [0], [0]
cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k = None, None, None, None
map_flag = False
for label in labels:
if label == 0: continue
instance_mask = instances_in_view == label
instance_feature = features[instance_mask]
instace_labels = instances_in_view[instance_mask]
seq_features.append(instance_feature)
seq_anchors.append(anchors[label-1])
seqlen = instance_feature.shape[0]
seqlens.append(seqlen)
seqlens_k.append(anchors.shape[1])
seq_labels.append(instace_labels)
if len(seq_features) > 0:
map_flag = True
if pad_seq:
seq_features = pad_sequence(seq_features, batch_first=True)
seq_labels = pad_sequence(seq_labels, batch_first=True)
seq_anchors = torch.stack(seq_anchors)
else:
seq_features = torch.cat(seq_features, dim=0)
seq_labels = torch.cat(seq_labels, dim=0)
seq_anchors = torch.cat(seq_anchors, dim=0)
cu_seqlens = torch.cumsum(torch.IntTensor(seqlens), dim=0).to(self.device).int()
max_seqlen = max(seqlens)
cu_seqlens_k = torch.cumsum(torch.IntTensor(seqlens_k), dim=0).to(self.device).int()
max_seqlen_k = max(seqlens_k)
return seq_features, seq_labels, seq_anchors, cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k, map_flag
def _unmap_features(self, features, seq_labels, instances_in_view):
*_, C = features.shape
B, H, W = instances_in_view.shape
unmapped = torch.zeros(B, H, W, C).to(self.device)
if self.config.anchor_config.anchor_type == "flash-attention":
unmapped = unmapped.reshape(-1, C)
instances_in_view = instances_in_view.reshape(-1)
assert unmapped.shape[0] == instances_in_view.shape[0]
labels = torch.unique(instances_in_view)
for label in labels:
if label == 0: continue
unmapped[instances_in_view == label] = features[seq_labels == label]
unmapped = unmapped.reshape(B, H, W, C)
elif self.config.anchor_config.anchor_type == "cross-attention":
unmapped = unmapped.reshape(-1, C)
instances_in_view = instances_in_view.reshape(-1)
assert unmapped.shape[0] == instances_in_view.shape[0]
for i in range(features.shape[0]): # feature indices indicate instances
unmapped[instances_in_view == i+1] = features[seq_labels == i+1]
unmapped = unmapped.reshape(B, H, W, C)
return unmapped
def _apply_outputs(self, features, anchors, instances_in_view):
if self.config.anchor_config.anchor_type in ["self-attention", "mean"]:
B, H, W = instances_in_view.shape # NOTE instance_in_view must in shape (B, H, W)
instances_in_view = instances_in_view.reshape(-1) - 1 # instances are indexed from 0, -1 is the background
background_mask = instances_in_view == -1
anchor_features = anchors[instances_in_view.long(), :]
anchor_features[background_mask] = 0
anchor_features = anchor_features.reshape(B, H, W, -1)
else:
anchor_features = anchors
# output
features = features + anchor_features
return features
def _prepare_flash_attention_inputs(self, query, key, value):
query = query.reshape(-1, self.num_heads, self.hidden_size)
key = key.reshape(-1, self.num_heads, self.hidden_size)
value = value.reshape(-1, self.num_heads, self.hidden_size)
key_value = torch.stack([key, value], dim=1)
return query, key_value
def forward(self, anchors, features, instances_in_view):
assert len(anchors.shape) == 3, "anchors should be in shape (M, L, C)"
assert len(features.shape) == 4, "features should be in shape (B, H, W, C)"
if self.config.anchor_config.anchor_type == "self-attention":
query, key, value = self._map_inputs(anchors, anchors, anchors)
anchors, _ = self.attention(query, key, value)
anchors = self._map_outputs(anchors)
elif self.config.anchor_config.anchor_type == "cross-attention":
seq_features, seq_labels, seq_anchors, cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k, map_flag = self._map_features(features, anchors, instances_in_view, True)
if map_flag:
seq_features, seq_anchors, seq_anchors = self._map_inputs(seq_features, seq_anchors, seq_anchors)
seq_features, _ = self.attention(
seq_features,
seq_anchors,
seq_anchors
)
seq_features = self._map_outputs(seq_features)
seq_features = self._unmap_features(seq_features, seq_labels, instances_in_view)
anchors = seq_features
else:
anchors = features
elif self.config.anchor_config.anchor_type == "flash-attention":
seq_features, seq_labels, seq_anchors, cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k, map_flag = self._map_features(features, anchors, instances_in_view)
if map_flag:
seq_features, seq_anchors, seq_anchors = self._map_inputs(seq_features, seq_anchors, seq_anchors)
seq_query, seq_key_value = self._prepare_flash_attention_inputs(seq_features, seq_anchors, seq_anchors)
seq_features = self.attention(
seq_query.half(), # (Sq, H, C)
seq_key_value.half(), # (Sk, 2, H_k, C)
cu_seqlens=cu_seqlens, max_seqlen=max_seqlen,
cu_seqlens_k=cu_seqlens_k, max_seqlen_k=max_seqlen_k
).to(torch.float32) # (Sq, H, C)
seq_features = self._map_outputs(seq_features.reshape(seq_features.shape[0], -1)) # (Sq, C)
seq_features = self._unmap_features(seq_features, seq_labels, instances_in_view)
anchors = seq_features
else:
anchors = features
else:
anchors = anchors.mean(1)
# output
features = self._apply_outputs(features, anchors, instances_in_view)
return features
# Path: models/modules/studio.py
import os
import json
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torchvision
import numpy as np
import sys
from omegaconf import OmegaConf
from pytorch3d.ops import interpolate_face_attributes
from pytorch3d.renderer import look_at_view_transform
from lib.camera_helper import init_trajectory, init_blenderproc_trajectory, init_camera_R_T
from lib.render_helper import init_renderer
from lib.shading_helper import init_flat_texel_shader
from lib.projection_helper import get_visible_pixel_uvs, get_all_4_locations
from models.modules.modules import MLP, Siren, HashGrid, HashGridMLP
from models.modules.anchors import AnchorTransformer
)
azim_linspace = np.linspace(
self.sphere_cameras.azim.min,
self.sphere_cameras.azim.max,
1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,
)
fov_linspace = np.linspace(
self.sphere_cameras.fov.min,
self.sphere_cameras.fov.max,
1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,
)
at = np.array(self.sphere_cameras.at)
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
dist_list = combinations[:, 0].tolist()
elev_list = combinations[:, 1].tolist()
azim_list = combinations[:, 2].tolist()
self.Rs, self.Ts = init_trajectory(dist_list, elev_list, azim_list, at)
self.fov_list = combinations[:, 3].tolist()
self.num_cameras = len(self.Rs)
print("=> using {} spherical cameras for training".format(self.num_cameras))
elif not self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:
poses = json.load(open(self.config.blenderproc_cameras))
self.Rs, self.Ts = init_blenderproc_trajectory(poses, self.device)
self.num_cameras = len(self.Rs)
self.fov_list = [self.config.fov] * self.num_cameras
print("=> using {} blenderproc cameras for training".format(self.num_cameras))
elif self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:
# spherical cameras
self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)
dist_linspace = np.linspace(
self.sphere_cameras.dist.min,
self.sphere_cameras.dist.max,
1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,
)
elev_linspace = np.linspace(
self.sphere_cameras.elev.min,
self.sphere_cameras.elev.max,
1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,
)
azim_linspace = np.linspace(
self.sphere_cameras.azim.min,
self.sphere_cameras.azim.max,
1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,
)
fov_linspace = np.linspace(
self.sphere_cameras.fov.min,
self.sphere_cameras.fov.max,
1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,
)
at = np.array(self.sphere_cameras.at)
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
dist_list = combinations[:, 0].tolist()
elev_list = combinations[:, 1].tolist()
azim_list = combinations[:, 2].tolist()
sphere_Rs, sphere_Ts = init_trajectory(dist_list, elev_list, azim_list, at)
sphere_fov_list = combinations[:, 3].tolist()
# blenderproc cameras
poses = json.load(open(self.config.blenderproc_cameras))
blenderproc_Rs, blenderproc_Ts = init_blenderproc_trajectory(poses, self.device)
blenderproc_fov_list = [self.config.fov] * len(blenderproc_Rs)
self.Rs = sphere_Rs + blenderproc_Rs
self.Ts = sphere_Ts + blenderproc_Ts
self.fov_list = sphere_fov_list + blenderproc_fov_list
self.num_cameras = len(self.Rs)
print("=> using {} spherical cameras and {} blenderproc cameras for training".format(len(sphere_Rs), len(blenderproc_Rs)))
# self.sphere_Rs = sphere_Rs
# self.sphere_Ts = sphere_Ts
# self.sphere_fov_list = sphere_fov_list
# self.num_sphere_cameras = len(self.sphere_Rs)
# self.Rs = sphere_Rs + blenderproc_Rs
# self.Ts = sphere_Ts + blenderproc_Ts
# self.fov_list = sphere_fov_list + blenderproc_fov_list
# self.num_cameras = len(self.Rs)
# print("=> using {} spherical cameras and {} blenderproc cameras for training".format(len(sphere_Rs), len(blenderproc_Rs)))
# print("=> using {} cameras before annealing and {} cameras afterwards".format(self.num_sphere_cameras, self.num_cameras))
else: # use fixed cameras
raise NotImplementedError
# for inference
# FIXME only support spherical cameras for now
# spherical cameras
self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)
dist_linspace = [self.sphere_cameras.dist.min] # always take the min dist from spherical cameras
elev_linspace = [self.config.elev]
azim_linspace = np.linspace(
self.config.azim[0],
self.config.azim[1],
self.config.log_latents_views,
)
fov_linspace = [self.config.fov]
at = np.array(self.sphere_cameras.at) # always take the cameras center from spherical cameras
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
self.inference_dist_list = combinations[:, 0].tolist()
self.inference_elev_list = combinations[:, 1].tolist()
self.inference_azim_list = combinations[:, 2].tolist()
self.inference_fov_list = combinations[:, 3].tolist()
self.inference_at = at
| self.num_inference_cameras = len(self.inference_dist_list) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: corl-team/xland-minigrid
# Path: src/xminigrid/core/constants.py
TILES_REGISTRY = []
# Path: src/xminigrid/core/constants.py
class Colors(struct.PyTreeNode):
EMPTY: int = struct.field(pytree_node=False, default=0)
END_OF_MAP: int = struct.field(pytree_node=False, default=1)
UNSEEN: int = struct.field(pytree_node=False, default=2)
RED: int = struct.field(pytree_node=False, default=3)
GREEN: int = struct.field(pytree_node=False, default=4)
BLUE: int = struct.field(pytree_node=False, default=5)
PURPLE: int = struct.field(pytree_node=False, default=6)
YELLOW: int = struct.field(pytree_node=False, default=7)
GREY: int = struct.field(pytree_node=False, default=8)
BLACK: int = struct.field(pytree_node=False, default=9)
ORANGE: int = struct.field(pytree_node=False, default=10)
WHITE: int = struct.field(pytree_node=False, default=11)
BROWN: int = struct.field(pytree_node=False, default=12)
PINK: int = struct.field(pytree_node=False, default=13)
# Path: src/xminigrid/core/constants.py
class Tiles(struct.PyTreeNode):
EMPTY: int = struct.field(pytree_node=False, default=0)
END_OF_MAP: int = struct.field(pytree_node=False, default=1)
UNSEEN: int = struct.field(pytree_node=False, default=2)
FLOOR: int = struct.field(pytree_node=False, default=3)
WALL: int = struct.field(pytree_node=False, default=4)
BALL: int = struct.field(pytree_node=False, default=5)
SQUARE: int = struct.field(pytree_node=False, default=6)
PYRAMID: int = struct.field(pytree_node=False, default=7)
GOAL: int = struct.field(pytree_node=False, default=8)
KEY: int = struct.field(pytree_node=False, default=9)
DOOR_LOCKED: int = struct.field(pytree_node=False, default=10)
DOOR_CLOSED: int = struct.field(pytree_node=False, default=11)
DOOR_OPEN: int = struct.field(pytree_node=False, default=12)
HEX: int = struct.field(pytree_node=False, default=13)
STAR: int = struct.field(pytree_node=False, default=14)
# Path: src/xminigrid/core/goals.py
class EmptyGoal(BaseGoal):
def __call__(self, grid, agent, action, position):
return jnp.asarray(False)
@classmethod
def decode(cls, encoding):
return cls()
def encode(self):
return jnp.zeros(MAX_GOAL_ENCODING_LEN, dtype=jnp.uint8)
# Path: src/xminigrid/core/grid.py
def equal(tile1: Tile, tile2: Tile) -> Tile:
# wait, is this just a jnp.array_equal?
return jnp.all(jnp.equal(tile1, tile2))
# Path: src/xminigrid/core/grid.py
def four_rooms(height: int, width: int) -> GridState:
wall_tile: Tile = TILES_REGISTRY[Tiles.WALL, Colors.GREY]
grid = empty_world(height, width)
grid = rectangle(grid, 0, 0, height, width, tile=wall_tile)
grid = vertical_line(grid, width // 2, 0, height, tile=wall_tile)
grid = horizontal_line(grid, 0, height // 2, width, tile=wall_tile)
return grid
# Path: src/xminigrid/core/grid.py
def horizontal_line(grid: GridState, x: int, y: int, length: int, tile: Tile) -> GridState:
grid = grid.at[y, x : x + length].set(tile)
return grid
# Path: src/xminigrid/core/grid.py
def nine_rooms(height: int, width: int) -> GridState:
wall_tile: Tile = TILES_REGISTRY[Tiles.WALL, Colors.GREY]
grid = empty_world(height, width)
grid = rectangle(grid, 0, 0, height, width, tile=wall_tile)
grid = vertical_line(grid, width // 3, 0, height, tile=wall_tile)
grid = vertical_line(grid, 2 * (width // 3), 0, height, tile=wall_tile)
grid = horizontal_line(grid, 0, height // 3, width, tile=wall_tile)
grid = horizontal_line(grid, 0, 2 * (height // 3), width, tile=wall_tile)
return grid
# Path: src/xminigrid/core/grid.py
def room(height: int, width: int) -> GridState:
grid = empty_world(height, width)
grid = rectangle(grid, 0, 0, height, width, tile=TILES_REGISTRY[Tiles.WALL, Colors.GREY])
return grid
# Path: src/xminigrid/core/grid.py
def sample_coordinates(key: jax.Array, grid: GridState, num: int, mask: jax.Array | None = None) -> jax.Array:
if mask is None:
mask = jnp.ones((grid.shape[0], grid.shape[1]), dtype=jnp.bool_)
coords = jax.random.choice(
key=key,
shape=(num,),
a=jnp.arange(grid.shape[0] * grid.shape[1]),
replace=False,
p=(mask & free_tiles_mask(grid)).flatten(),
)
coords = jnp.divmod(coords, grid.shape[1])
coords = jnp.concatenate((coords[0].reshape(-1, 1), coords[1].reshape(-1, 1)), axis=-1)
return coords
# Path: src/xminigrid/core/grid.py
def sample_direction(key: jax.Array) -> jax.Array:
return jax.random.randint(key, shape=(), minval=0, maxval=4)
# Path: src/xminigrid/core/grid.py
def two_rooms(height: int, width: int) -> GridState:
wall_tile: Tile = TILES_REGISTRY[Tiles.WALL, Colors.GREY]
grid = empty_world(height, width)
grid = rectangle(grid, 0, 0, height, width, tile=wall_tile)
grid = vertical_line(grid, width // 2, 0, height, tile=wall_tile)
return grid
# Path: src/xminigrid/core/grid.py
def vertical_line(grid: GridState, x: int, y: int, length: int, tile: Tile) -> GridState:
grid = grid.at[y : y + length, x].set(tile)
return grid
# Path: src/xminigrid/core/rules.py
class EmptyRule(BaseRule):
def __call__(self, grid, agent, action, position):
return grid, agent
@classmethod
def decode(cls, encoding):
return cls()
def encode(self):
return jnp.zeros(MAX_RULE_ENCODING_LEN, dtype=jnp.uint8)
# Path: src/xminigrid/environment.py
class Environment:
def default_params(self, **kwargs) -> EnvParams:
return EnvParams().replace(**kwargs)
def num_actions(self, params: EnvParams) -> int:
return int(NUM_ACTIONS)
def observation_shape(self, params: EnvParams) -> tuple[int, int, int]:
return (params.view_size, params.view_size, NUM_LAYERS)
# TODO: NOT sure that this should be hardcoded like that...
def time_limit(self, params: EnvParams) -> int:
return 3 * params.height * params.width
def _generate_problem(self, params: EnvParams, key: jax.Array) -> State:
return NotImplemented
def reset(self, params: EnvParams, key: jax.Array) -> TimeStep:
state = self._generate_problem(params, key)
timestep = TimeStep(
state=state,
step_type=StepType.FIRST,
reward=jnp.asarray(0.0),
discount=jnp.asarray(1.0),
observation=transparent_field_of_view(state.grid, state.agent, params.view_size, params.view_size),
)
return timestep
# Why timestep + state at once, and not like in Jumanji? To be able to do autoresets in gym and envpools styles
def step(self, params: EnvParams, timestep: TimeStep, action: int) -> TimeStep:
new_grid, new_agent, changed_position = take_action(timestep.state.grid, timestep.state.agent, action)
new_grid, new_agent = check_rule(timestep.state.rule_encoding, new_grid, new_agent, action, changed_position)
new_state = timestep.state.replace(
grid=new_grid,
agent=new_agent,
step_num=timestep.state.step_num + 1,
)
new_observation = transparent_field_of_view(new_state.grid, new_state.agent, params.view_size, params.view_size)
# checking for termination or truncation, choosing step type
terminated = check_goal(new_state.goal_encoding, new_state.grid, new_state.agent, action, changed_position)
truncated = jnp.equal(new_state.step_num, self.time_limit(params))
reward = jax.lax.select(terminated, 1.0 - 0.9 * (new_state.step_num / self.time_limit(params)), 0.0)
step_type = jax.lax.select(terminated | truncated, StepType.LAST, StepType.MID)
discount = jax.lax.select(terminated, jnp.asarray(0.0), jnp.asarray(1.0))
timestep = TimeStep(
state=new_state,
step_type=step_type,
reward=reward,
discount=discount,
observation=new_observation,
)
return timestep
def render(self, params: EnvParams, timestep: TimeStep):
if params.render_mode == "rgb_array":
return rgb_render(timestep.state.grid, timestep.state.agent, params.view_size)
elif params.render_mode == "rich_text":
return text_render(timestep.state.grid, timestep.state.agent)
else:
raise RuntimeError("Unknown render mode. Should be one of: ['rgb_array', 'rich_text']")
# Path: src/xminigrid/environment.py
class EnvParams(struct.PyTreeNode):
# WARN: pytree_node=False, so you CAN NOT vmap on them!
# You can add pytree node params, but be careful and
# test that your code will work under jit.
# Spoiler: probably it will not :(
height: int = struct.field(pytree_node=False, default=9)
width: int = struct.field(pytree_node=False, default=9)
view_size: int = struct.field(pytree_node=False, default=7)
render_mode: str = struct.field(pytree_node=False, default="rgb_array")
# Path: src/xminigrid/types.py
class AgentState(struct.PyTreeNode):
position: jax.Array = jnp.asarray((0, 0))
direction: jax.Array = jnp.asarray(0)
pocket: jax.Array = TILES_REGISTRY[Tiles.EMPTY, Colors.EMPTY]
# Path: src/xminigrid/types.py
class EnvCarry(struct.PyTreeNode):
...
# Path: src/xminigrid/types.py
class RuleSet(struct.PyTreeNode):
goal: jax.Array
rules: jax.Array
init_tiles: jax.Array
# Path: src/xminigrid/types.py
class State(struct.PyTreeNode):
key: jax.random.PRNGKey
step_num: jax.Array
grid: GridState
agent: AgentState
goal_encoding: jax.Array
rule_encoding: jax.Array
carry: EnvCarry
# Path: src/xminigrid/envs/xland.py
import jax
import jax.numpy as jnp
from flax import struct
from ..core.constants import TILES_REGISTRY, Colors, Tiles
from ..core.goals import EmptyGoal
from ..core.grid import (
equal,
four_rooms,
horizontal_line,
nine_rooms,
room,
sample_coordinates,
sample_direction,
two_rooms,
vertical_line,
)
from ..core.rules import EmptyRule
from ..environment import Environment, EnvParams
from ..types import AgentState, EnvCarry, RuleSet, State
_wall_tile = TILES_REGISTRY[Tiles.WALL, Colors.GREY]
# colors for doors between rooms
_allowed_colors = jnp.array(
(
Colors.RED,
Colors.GREEN,
Colors.BLUE,
Colors.PURPLE,
Colors.YELLOW,
Colors.GREY,
)
)
# helper functions to generate various maps, inspired by the common minigrid layouts
# TODO: all worlds should be square
def generate_room(key, height, width):
grid = room(height, width)
return key, grid
def generate_two_rooms(key, height, width):
key, color_key, door_key = jax.random.split(key, num=3)
color = jax.random.choice(color_key, _allowed_colors)
door_pos = jax.random.randint(door_key, shape=(), minval=1, maxval=height - 1)
grid = two_rooms(height, width)
grid = grid.at[door_pos, width // 2].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, color])
return key, grid
def generate_four_rooms(key, height, width):
key, doors_key, colors_key = jax.random.split(key, num=3)
doors_offsets = jax.random.randint(doors_key, shape=(4,), minval=1, maxval=height // 2)
colors = jax.random.choice(colors_key, _allowed_colors, shape=(4,))
grid = four_rooms(height, width)
grid = grid.at[height // 2, doors_offsets[0]].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, colors[0]])
grid = grid.at[height // 2, width // 2 + doors_offsets[1]].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, colors[1]])
grid = grid.at[doors_offsets[2], width // 2].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, colors[2]])
grid = grid.at[height // 2 + doors_offsets[3], width // 2].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, colors[3]])
return key, grid
def generate_six_rooms(key, height, width):
key, colors_key = jax.random.split(key)
grid = room(height, width)
grid = vertical_line(grid, width // 2 - 2, 0, height, _wall_tile)
grid = vertical_line(grid, width // 2 + 2, 0, height, _wall_tile)
for i in range(1, 3):
grid = horizontal_line(grid, 0, i * (height // 3), width // 2 - 2, _wall_tile)
grid = horizontal_line(grid, width // 2 + 2, i * (height // 3), width // 2 - 2, _wall_tile)
doors_idxs = (
# left doors
(height // 2 - (height // 3), width // 2 - 2),
(height // 2, width // 2 - 2),
(height // 2 + (height // 3), width // 2 - 2),
# right doors
(height // 2 - (height // 3), width // 2 + 2),
(height // 2, width // 2 + 2),
(height // 2 + (height // 3), width // 2 + 2),
)
colors = jax.random.choice(colors_key, _allowed_colors, shape=(6,))
for i in range(6):
grid = grid.at[doors_idxs[i][0], doors_idxs[i][1]].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, colors[i]])
return key, grid
def generate_nine_rooms(key, height, width):
# valid sizes should follow 3 * x + 4: 7, 10, 13, 16, 19, 22, 25, 28, 31, ...
# (size - 4) % 3 == 0
key, doors_key, colors_key = jax.random.split(key, num=3)
roomW, roomH = width // 3, height // 3
grid = nine_rooms(height, width)
# assuming that rooms are square!
door_coords = jax.random.randint(doors_key, shape=(12,), minval=1, maxval=roomW)
colors = jax.random.choice(colors_key, _allowed_colors, shape=(12,))
# adapted from minigrid playground
door_idx = 0
for i in range(0, 3):
for j in range(0, 3):
xL = i * roomW
yT = j * roomH
xR = xL + roomW
yB = yT + roomH
if i + 1 < 3:
grid = grid.at[yT + door_coords[door_idx], xR].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, colors[door_idx]])
door_idx = door_idx + 1
if j + 1 < 3:
grid = grid.at[yB, xL + door_coords[door_idx]].set(TILES_REGISTRY[Tiles.DOOR_CLOSED, colors[door_idx]])
door_idx = door_idx + 1
return key, grid
class XLandMiniGridEnvOptions(EnvParams):
# you can vmap on rulesets for multi-task/meta learning
ruleset: RuleSet = struct.field(pytree_node=True, default=_empty_ruleset)
# experimental (can not vmap on it)
grid_type: int = struct.field(pytree_node=False, default="1R")
class XLandMiniGrid(Environment):
def default_params(self, **kwargs) -> XLandMiniGridEnvOptions:
default_params = XLandMiniGridEnvOptions(view_size=5)
return default_params.replace(**kwargs)
| def time_limit(self, params: XLandMiniGridEnvOptions) -> int: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Vchitect/VBench
# Path: vbench/third_party/grit_src/grit/modeling/roi_heads/grit_fast_rcnn.py
class GRiTFastRCNNOutputLayers(FastRCNNOutputLayers):
@configurable
def __init__(
self,
input_shape: ShapeSpec,
**kwargs,
):
super().__init__(
input_shape=input_shape,
**kwargs,
)
input_size = input_shape.channels * \
(input_shape.width or 1) * (input_shape.height or 1)
self.bbox_pred = nn.Sequential(
nn.Linear(input_size, input_size),
nn.ReLU(inplace=True),
nn.Linear(input_size, 4)
)
weight_init.c2_xavier_fill(self.bbox_pred[0])
nn.init.normal_(self.bbox_pred[-1].weight, std=0.001)
nn.init.constant_(self.bbox_pred[-1].bias, 0)
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
return ret
def losses(self, predictions, proposals):
scores, proposal_deltas = predictions
gt_classes = (
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
)
num_classes = self.num_classes
_log_classification_stats(scores, gt_classes)
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
gt_boxes = cat(
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
dim=0,
)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)
return {
"loss_cls": loss_cls,
"loss_box_reg": self.box_reg_loss(
proposal_boxes, gt_boxes, proposal_deltas, gt_classes,
num_classes=num_classes)
}
def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes):
if pred_class_logits.numel() == 0:
return pred_class_logits.new_zeros([1])[0]
loss = F.cross_entropy(
pred_class_logits, gt_classes, reduction="mean")
return loss
def box_reg_loss(
self, proposal_boxes, gt_boxes, pred_deltas, gt_classes,
num_classes=-1):
num_classes = num_classes if num_classes > 0 else self.num_classes
box_dim = proposal_boxes.shape[1]
fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < num_classes))[0]
if pred_deltas.shape[1] == box_dim:
fg_pred_deltas = pred_deltas[fg_inds]
else:
fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
fg_inds, gt_classes[fg_inds]
]
if self.box_reg_loss_type == "smooth_l1":
gt_pred_deltas = self.box2box_transform.get_deltas(
proposal_boxes[fg_inds],
gt_boxes[fg_inds],
)
loss_box_reg = smooth_l1_loss(
fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
fg_pred_deltas, proposal_boxes[fg_inds]
)
loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
return loss_box_reg / max(gt_classes.numel(), 1.0)
def predict_probs(self, predictions, proposals):
scores = predictions[0]
num_inst_per_image = [len(p) for p in proposals]
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
def forward(self, x):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = []
cls_scores = self.cls_score(x)
scores.append(cls_scores)
scores = torch.cat(scores, dim=1)
proposal_deltas = self.bbox_pred(x)
return scores, proposal_deltas
# Path: vbench/third_party/grit_src/grit/modeling/text/text_decoder.py
class TransformerDecoderTextualHead(TextualHead):
def __init__(
self,
object_feature_size: int,
vocab_size: int,
hidden_size: int,
num_layers: int,
attention_heads: int,
feedforward_size: int,
dropout: float = 0.1,
norm_type: str = "post",
mask_future_positions: bool = True,
max_caption_length: int = 1024,
padding_idx: int = 0,
decoder_type=None,
not_tie_weight=None,
output_hidden_states=None,
use_mlp_wrapper=None,
use_act_checkpoint=True,
):
super().__init__(object_feature_size, vocab_size, hidden_size)
self.num_layers = num_layers
self.attention_heads = attention_heads
self.feedforward_size = feedforward_size
self.dropout = dropout
assert mask_future_positions
self.padding_idx = padding_idx
self.object_feature_projection = nn.Sequential(
nn.Linear(object_feature_size, self.textual_feature_size),
nn.LayerNorm(self.textual_feature_size))
self.embedding = WordAndPositionalEmbedding(
self.vocab_size,
self.textual_feature_size,
dropout=dropout,
max_caption_length=max_caption_length,
padding_idx=padding_idx,
)
self.transformer = create_transformer(
decoder_type=decoder_type,
norm_type=norm_type,
textual_feature_size=self.textual_feature_size,
attention_heads=self.attention_heads,
feedforward_size=self.feedforward_size,
dropout=dropout,
num_layers=self.num_layers,
output_hidden_states=output_hidden_states,
use_mlp_wrapper=use_mlp_wrapper,
use_act_checkpoint=use_act_checkpoint,
)
self.apply(self._init_weights)
# Create an output linear layer and tie the input and output word
# embeddings to reduce parametejs.
self.output = nn.Linear(self.textual_feature_size, vocab_size)
if not not_tie_weight:
self.output.weight = self.embedding.words.weight
@staticmethod
def _init_weights(module):
"""Initialize weights like BERT - N(0.0, 0.02), bias = 0."""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.MultiheadAttention):
module.in_proj_weight.data.normal_(mean=0.0, std=0.02)
module.out_proj.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def forward(
self,
hidden_states,
text_tokens,
):
projected_object_features = self.object_feature_projection(hidden_states) if hidden_states is not None else None
batch_size, max_text_length = text_tokens.size()
text_embeddings = self.embedding(text_tokens)
# An additive mask for masking the future (one direction).
uni_mask_zero_neg = self._generate_future_mask(
max_text_length, text_embeddings.dtype, text_embeddings.device
)
# We transpose the first two dimensions of tokens embeddings and visual
# features, as required by decoder.
text_embeddings = text_embeddings.transpose(0, 1)
projected_object_features = projected_object_features.transpose(0, 1)
# if transformer here is the pytorch/decoder, there is no chance, the
# output is always tensor
trans_out = self.transformer(
text_embeddings,
projected_object_features,
tgt_mask=uni_mask_zero_neg,
)
if isinstance(trans_out, tuple):
textual_features = trans_out[0]
else:
assert isinstance(trans_out, torch.Tensor)
textual_features = trans_out
# Undo the transpose and bring batch to dim 0.
# shape: (batch_size, max_caption_length, hidden_size)
textual_features = textual_features.transpose(0, 1)
# shape: (batch_size, max_caption_length, vocab_size)
output_logits = self.output(textual_features)
if isinstance(trans_out, tuple):
return output_logits, trans_out[1]
else:
return output_logits
def _generate_future_mask(
self, size: int, dtype: torch.dtype, device: torch.device
):
# Default mask is for forward direction. Flip for backward direction.
mask = torch.triu(
torch.ones(size, size, device=device, dtype=dtype), diagonal=1
)
mask = mask.masked_fill(mask == 1, float("-inf"))
return mask
# Path: vbench/third_party/grit_src/grit/modeling/text/text_decoder.py
class GRiTTextDecoder(nn.Module):
def __init__(
self,
transformer,
begin_token_id=101,
beamsearch_decode=None,
loss_type=None,
tokenizer=None,
):
super().__init__()
self.textual = transformer
self.padding_idx = self.textual.padding_idx
self.begin_token_id = begin_token_id
self.beamsearch_decode = beamsearch_decode
self.tokenizer = tokenizer
if loss_type is None:
self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_idx)
elif loss_type == 'smooth':
self.loss = SmoothLabelCrossEntropyLoss(ignore_index=self.padding_idx)
else:
raise NotImplementedError(loss_type)
def forward(self, batch):
object_features = batch['object_features']
if self.training:
caption_token_input = batch["text_tokens"]
output_logits = self.textual(
object_features,
caption_token_input,
)
if 'need_predict' in batch:
# in place should also be good, but we do not choose that for
# safety as we may use it in prediction results in future
target = batch["text_tokens"].clone()
target[batch['need_predict'] == 0] = self.padding_idx
else:
target = batch["text_tokens"]
feat = output_logits[:, :-1].contiguous()
target = target[:, 1:].contiguous()
feat = feat.view(-1, self.textual.vocab_size)
target = target.view(-1)
valid_mask = target != self.padding_idx
target = target[valid_mask]
feat = feat[valid_mask]
loss = self.loss(feat, target)
return loss
else:
output_dict = self.infer(object_features)
return output_dict
def infer(self, object_features):
batch_size = object_features.size(0)
begin_tokens = object_features.new_full(
(batch_size, 1), self.begin_token_id
).long()
decoding_step = functools.partial(
self.decoding_step, object_features
)
object_description_tokens, logprobs = self.beamsearch_decode.search(
begin_tokens, decoding_step
)
output_dict = {
'predictions': object_description_tokens,
'logprobs': logprobs,
}
return output_dict
def decoding_step(self, object_features, partial_text):
batch_size = object_features.shape[0]
beam_size = int(partial_text.size(0) / batch_size)
if beam_size > 1:
batch_size, num_token, channels = object_features.size()
object_features = object_features.unsqueeze(1).repeat(1, beam_size, 1, 1)
object_features = object_features.view(
batch_size * beam_size, num_token, channels
)
text_lengths = torch.ones_like(partial_text)
if len(text_lengths.size()) != 2:
partial_text = partial_text.unsqueeze(1)
# shape: (batch_size * beam_size, partial_caption_length, vocab_size)
logits = self.textual(
object_features,
partial_text,
)
return logits[:, -1, :].float()
# Path: vbench/third_party/grit_src/grit/modeling/text/text_decoder.py
class AutoRegressiveBeamSearch(object):
def __init__(
self,
end_token_id: int,
max_steps: int = 50,
beam_size: int = 5,
objectdet=True,
per_node_beam_size: int = 2,
):
self._eos_index = end_token_id
self.max_steps = max_steps
self.beam_size = beam_size
self.objectdet = objectdet
self.per_node_beam_size = per_node_beam_size or beam_size
def search(self, begin_tokens, step):
if self.beam_size > 1 and self.objectdet:
only_return_best = False
else:
only_return_best = True
batch_size = begin_tokens.size()[0]
predictions = begin_tokens.unsqueeze(1).expand((batch_size, self.beam_size, begin_tokens.shape[-1]))
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_logits = step(begin_tokens)
# Convert logits to logprobs.
# shape: (batch_size * beam_size, vocab_size)
start_class_logprobs = F.log_softmax(start_class_logits, dim=1)
num_classes = start_class_logprobs.size()[1]
# shape: (batch_size, beam_size), (batch_size, beam_size)
start_top_logprobs, start_predicted_classes = start_class_logprobs.topk(
self.beam_size
)
if (
self.beam_size == 1
and (start_predicted_classes == self._eos_index).all()
):
warnings.warn(
"Empty object description predicted. You may want to increase beam"
"size or ensure your step function is working properly.",
RuntimeWarning,
)
if only_return_best:
return start_predicted_classes, start_top_logprobs
else:
return start_predicted_classes.unsqueeze(-1), start_top_logprobs
# The log probs for the last time step.
# shape: (batch_size, beam_size)
last_logprobs = start_top_logprobs
# shape: (batch_size, beam_size, sequence_length)
predictions = torch.cat([predictions, start_predicted_classes.unsqueeze(-1)], dim=-1)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
logprobs_after_end = start_class_logprobs.new_full(
(batch_size * self.beam_size, num_classes), float("-inf")
)
logprobs_after_end[:, self._eos_index] = 0.0
logits_after_end = start_class_logprobs.new_full(
(batch_size * self.beam_size, num_classes), float("-inf")
)
logits_after_end[:, self._eos_index] = 0
while predictions.shape[-1] < self.max_steps:
# shape: (batch_size * beam_size,)
last_predictions = predictions[:, :, -1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._eos_index`,
# then we can stop early.
if (last_predictions == self._eos_index).all():
break
predictions_so_far = predictions.view(
batch_size * self.beam_size, -1
)
# shape: (batch_size * beam_size, num_classes)
class_logits = step(predictions_so_far)
# Set logprobs of last predicted tokens as high negative value to avoid
# repetition in description.
class_logits = class_logits.scatter(1, predictions_so_far[:, -1].view((-1, 1)), -10000)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size, num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
class_logits = torch.where(
last_predictions_expanded == self._eos_index,
logits_after_end,
class_logits,
)
# Convert logits to logprobs.
# shape: (batch_size * beam_size, vocab_size)
class_logprobs = F.log_softmax(class_logits, dim=1)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_logprobs, predicted_classes = class_logprobs.topk(
self.per_node_beam_size
)
# Here we expand the last log probs to `(batch_size * beam_size,
# per_node_beam_size)` so that we can add them to the current log
# probs for this timestep. This lets us maintain the log
# probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_logprobs = (
last_logprobs.unsqueeze(2)
.expand(batch_size, self.beam_size, self.per_node_beam_size)
.reshape(batch_size * self.beam_size, self.per_node_beam_size)
)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_logprobs = top_logprobs + expanded_last_logprobs
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_logprobs.reshape(
batch_size, self.beam_size * self.per_node_beam_size
)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.reshape(
batch_size, self.beam_size * self.per_node_beam_size
)
# Append the predictions to the current beam.
reshaped_beam = (
predictions.view(batch_size * self.beam_size, 1, -1)
.repeat(1, self.per_node_beam_size, 1)
.reshape(batch_size, self.beam_size * self.per_node_beam_size, -1)
)
# batch_size, (beam_size * per_node_beach_size), #token
reshaped_beam = torch.cat([reshaped_beam, reshaped_predicted_classes.unsqueeze(-1)], dim=-1)
# Keep only the top `beam_size` beam indices.
# shape: (batch_size, beam_size), (batch_size, beam_size)
restricted_beam_logprobs, restricted_beam_indices = reshaped_summed.topk(
self.beam_size
)
predictions = reshaped_beam.gather(
1, restricted_beam_indices.unsqueeze(-1).repeat(1,1,reshaped_beam.shape[-1])
)
# shape: (batch_size, beam_size)
last_logprobs = restricted_beam_logprobs
if not torch.isfinite(last_logprobs).all():
warnings.warn(
"Infinite log probs encountered. Some final descriptions may not "
"make sense. This can happen when the beam size is larger than"
" the number of valid (non-zero probability) transitions that "
"the step function produces.",
RuntimeWarning,
)
# Optionally select best beam and its logprobs.
if only_return_best:
# shape: (batch_size, sequence_length)
predictions = predictions[:, 0, :]
last_logprobs = last_logprobs[:, 0]
num_valid = (predictions != self._eos_index).sum(dim=-1)
num_valid += (predictions == self._eos_index).sum(dim=-1) > 0
num_valid = num_valid - begin_tokens.shape[1]
num_valid = num_valid.clip(min=1)
last_logprobs = last_logprobs / num_valid
return predictions, last_logprobs
# Path: vbench/third_party/grit_src/grit/modeling/text/load_text_token.py
class LoadTextTokens(object):
def __init__(self, tokenizer, max_text_len=40, padding='do_not_pad'):
self.tokenizer = tokenizer
self.max_text_len = max_text_len
self.padding = padding
def descriptions_to_text_tokens(self, target, begin_token):
target_encoding = self.tokenizer(
target, padding=self.padding,
add_special_tokens=False,
truncation=True, max_length=self.max_text_len)
need_predict = [1] * len(target_encoding['input_ids'])
payload = target_encoding['input_ids']
if len(payload) > self.max_text_len - 2:
payload = payload[-(self.max_text_len - 2):]
need_predict = payload[-(self.max_text_len - 2):]
input_ids = [begin_token] + payload + [self.tokenizer.sep_token_id]
need_predict = [0] + need_predict + [1]
data = {
'text_tokens': torch.tensor(input_ids),
'text_lengths': len(input_ids),
'need_predict': torch.tensor(need_predict),
}
return data
def __call__(self, object_descriptions, box_features, begin_token):
text_tokens = []
text_lengths = []
need_predict = []
for description in object_descriptions:
tokens = self.descriptions_to_text_tokens(description, begin_token)
text_tokens.append(tokens['text_tokens'])
text_lengths.append(tokens['text_lengths'])
need_predict.append(tokens['need_predict'])
text_tokens = torch.cat(self.collate(text_tokens), dim=0).to(box_features.device)
text_lengths = torch.tensor(text_lengths).to(box_features.device)
need_predict = torch.cat(self.collate(need_predict), dim=0).to(box_features.device)
assert text_tokens.dim() == 2 and need_predict.dim() == 2
data = {'text_tokens': text_tokens,
'text_lengths': text_lengths,
'need_predict': need_predict}
return data
def collate(self, batch):
if all(isinstance(b, torch.Tensor) for b in batch) and len(batch) > 0:
if not all(b.shape == batch[0].shape for b in batch[1:]):
assert all(len(b.shape) == len(batch[0].shape) for b in batch[1:])
shape = torch.tensor([b.shape for b in batch])
max_shape = tuple(shape.max(dim=0)[0].tolist())
batch2 = []
for b in batch:
if any(c < m for c, m in zip(b.shape, max_shape)):
b2 = torch.zeros(max_shape, dtype=b.dtype, device=b.device)
if b.dim() == 1:
b2[:b.shape[0]] = b
elif b.dim() == 2:
b2[:b.shape[0], :b.shape[1]] = b
elif b.dim() == 3:
b2[:b.shape[0], :b.shape[1], :b.shape[2]] = b
else:
raise NotImplementedError
b = b2
batch2.append(b[None, ...])
else:
batch2 = []
for b in batch:
batch2.append(b[None, ...])
return batch2
else:
raise NotImplementedError
# Path: vbench/third_party/grit_src/grit/data/custom_dataset_mapper.py
class ObjDescription:
def __init__(self, object_descriptions):
self.data = object_descriptions
def __getitem__(self, item):
assert type(item) == torch.Tensor
assert item.dim() == 1
if len(item) > 0:
assert item.dtype == torch.int64 or item.dtype == torch.bool
if item.dtype == torch.int64:
return ObjDescription([self.data[x.item()] for x in item])
elif item.dtype == torch.bool:
return ObjDescription(list(compress(self.data, item)))
return ObjDescription(list(compress(self.data, item)))
def __len__(self):
return len(self.data)
def __repr__(self):
return "ObjDescription({})".format(self.data)
# Path: vbench/third_party/grit_src/grit/modeling/soft_nms.py
def batched_soft_nms(
boxes, scores, idxs, method, gaussian_sigma, linear_threshold, prune_threshold
):
"""
Performs soft non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 4]):
boxes where NMS will be performed. They
are expected to be in (x1, y1, x2, y2) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
if boxes.numel() == 0:
return (
torch.empty((0,), dtype=torch.int64, device=boxes.device),
torch.empty((0,), dtype=torch.float32, device=scores.device),
)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
return soft_nms(
boxes_for_nms, scores, method, gaussian_sigma, linear_threshold, prune_threshold
)
# Path: vbench/third_party/grit_src/grit/modeling/roi_heads/grit_roi_heads.py
import math
import torch
import logging
from typing import Dict, List, Optional, Tuple, Union
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads, _ScaleGradient
from detectron2.modeling.poolers import ROIPooler
from detectron2.layers import batched_nms
from .grit_fast_rcnn import GRiTFastRCNNOutputLayers
from ..text.text_decoder import TransformerDecoderTextualHead, GRiTTextDecoder, AutoRegressiveBeamSearch
from ..text.load_text_token import LoadTextTokens
from transformers import BertTokenizer
from vbench.third_party.grit_src.grit.data.custom_dataset_mapper import ObjDescription
from ..soft_nms import batched_soft_nms
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class GRiTROIHeadsAndTextDecoder(CascadeROIHeads):
@configurable
def __init__(
self,
*,
text_decoder_transformer,
train_task: list,
test_task: str,
mult_proposal_score: bool = False,
mask_weight: float = 1.0,
object_feat_pooler=None,
soft_nms_enabled=False,
beam_size=1,
**kwargs,
):
super().__init__(**kwargs)
self.mult_proposal_score = mult_proposal_score
self.mask_weight = mask_weight
self.object_feat_pooler = object_feat_pooler
self.soft_nms_enabled = soft_nms_enabled
self.test_task = test_task
self.beam_size = beam_size
| tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: 16lemoing/dot
# Path: dot/models/shelf/cotracker_utils/models/core/cotracker/cotracker.py
class CoTracker(nn.Module):
def __init__(
self,
S=8,
stride=8,
add_space_attn=True,
num_heads=8,
hidden_size=384,
space_depth=12,
time_depth=12,
):
super(CoTracker, self).__init__()
self.S = S
self.stride = stride
self.hidden_dim = 256
self.latent_dim = latent_dim = 128
self.corr_levels = 4
self.corr_radius = 3
self.add_space_attn = add_space_attn
self.fnet = BasicEncoder(
output_dim=self.latent_dim, norm_fn="instance", dropout=0, stride=stride
)
self.updateformer = UpdateFormer(
space_depth=space_depth,
time_depth=time_depth,
input_dim=456,
hidden_size=hidden_size,
num_heads=num_heads,
output_dim=latent_dim + 2,
mlp_ratio=4.0,
add_space_attn=add_space_attn,
)
self.norm = nn.GroupNorm(1, self.latent_dim)
self.ffeat_updater = nn.Sequential(
nn.Linear(self.latent_dim, self.latent_dim),
nn.GELU(),
)
self.vis_predictor = nn.Sequential(
nn.Linear(self.latent_dim, 1),
)
def forward_iteration(
self,
fmaps,
coords_init,
feat_init=None,
vis_init=None,
track_mask=None,
iters=4,
):
B, S_init, N, D = coords_init.shape
assert D == 2
assert B == 1
B, S, __, H8, W8 = fmaps.shape
device = fmaps.device
if S_init < S:
coords = torch.cat(
[coords_init, coords_init[:, -1].repeat(1, S - S_init, 1, 1)], dim=1
)
vis_init = torch.cat(
[vis_init, vis_init[:, -1].repeat(1, S - S_init, 1, 1)], dim=1
)
else:
coords = coords_init.clone()
fcorr_fn = CorrBlock(
fmaps, num_levels=self.corr_levels, radius=self.corr_radius
)
ffeats = feat_init.clone()
times_ = torch.linspace(0, S - 1, S).reshape(1, S, 1)
pos_embed = sample_pos_embed(
grid_size=(H8, W8),
embed_dim=456,
coords=coords,
)
pos_embed = rearrange(pos_embed, "b e n -> (b n) e").unsqueeze(1)
times_embed = (
torch.from_numpy(get_1d_sincos_pos_embed_from_grid(456, times_[0]))[None]
.repeat(B, 1, 1)
.float()
.to(device)
)
coord_predictions = []
for __ in range(iters):
coords = coords.detach()
fcorr_fn.corr(ffeats)
fcorrs = fcorr_fn.sample(coords) # B, S, N, LRR
LRR = fcorrs.shape[3]
fcorrs_ = fcorrs.permute(0, 2, 1, 3).reshape(B * N, S, LRR)
flows_ = (coords - coords[:, 0:1]).permute(0, 2, 1, 3).reshape(B * N, S, 2)
flows_cat = get_2d_embedding(flows_, 64, cat_coords=True)
ffeats_ = ffeats.permute(0, 2, 1, 3).reshape(B * N, S, self.latent_dim)
if track_mask.shape[1] < vis_init.shape[1]:
track_mask = torch.cat(
[
track_mask,
torch.zeros_like(track_mask[:, 0]).repeat(
1, vis_init.shape[1] - track_mask.shape[1], 1, 1
),
],
dim=1,
)
concat = (
torch.cat([track_mask, vis_init], dim=2)
.permute(0, 2, 1, 3)
.reshape(B * N, S, 2)
)
transformer_input = torch.cat([flows_cat, fcorrs_, ffeats_, concat], dim=2)
x = transformer_input + pos_embed + times_embed
x = rearrange(x, "(b n) t d -> b n t d", b=B)
delta = self.updateformer(x)
delta = rearrange(delta, " b n t d -> (b n) t d")
delta_coords_ = delta[:, :, :2]
delta_feats_ = delta[:, :, 2:]
delta_feats_ = delta_feats_.reshape(B * N * S, self.latent_dim)
ffeats_ = ffeats.permute(0, 2, 1, 3).reshape(B * N * S, self.latent_dim)
ffeats_ = self.ffeat_updater(self.norm(delta_feats_)) + ffeats_
ffeats = ffeats_.reshape(B, N, S, self.latent_dim).permute(
0, 2, 1, 3
) # B,S,N,C
coords = coords + delta_coords_.reshape(B, N, S, 2).permute(0, 2, 1, 3)
coord_predictions.append(coords * self.stride)
vis_e = self.vis_predictor(ffeats.reshape(B * S * N, self.latent_dim)).reshape(
B, S, N
)
return coord_predictions, vis_e, feat_init
def forward(self, rgbs, queries, iters=4, cached_feat=None, feat_init=None, is_train=False):
B, T, C, H, W = rgbs.shape
B, N, __ = queries.shape
device = rgbs.device
assert B == 1
# INIT for the first sequence
# We want to sort points by the first frame they are visible to add them to the tensor of tracked points consequtively
first_positive_inds = queries[:, :, 0].long()
__, sort_inds = torch.sort(first_positive_inds[0], dim=0, descending=False)
inv_sort_inds = torch.argsort(sort_inds, dim=0)
first_positive_sorted_inds = first_positive_inds[0][sort_inds]
assert torch.allclose(
first_positive_inds[0], first_positive_inds[0][sort_inds][inv_sort_inds]
)
coords_init = queries[:, :, 1:].reshape(B, 1, N, 2).repeat(
1, self.S, 1, 1
) / float(self.stride)
rgbs = 2 * rgbs - 1.0
traj_e = torch.zeros((B, T, N, 2), device=device)
vis_e = torch.zeros((B, T, N), device=device)
ind_array = torch.arange(T, device=device)
ind_array = ind_array[None, :, None].repeat(B, 1, N)
track_mask = (ind_array >= first_positive_inds[:, None, :]).unsqueeze(-1)
# these are logits, so we initialize visibility with something that would give a value close to 1 after softmax
vis_init = torch.ones((B, self.S, N, 1), device=device).float() * 10
ind = 0
track_mask_ = track_mask[:, :, sort_inds].clone()
coords_init_ = coords_init[:, :, sort_inds].clone()
vis_init_ = vis_init[:, :, sort_inds].clone()
prev_wind_idx = 0
fmaps_ = None
vis_predictions = []
coord_predictions = []
wind_inds = []
while ind < T - self.S // 2:
rgbs_seq = rgbs[:, ind : ind + self.S]
S = S_local = rgbs_seq.shape[1]
if cached_feat is None:
if S < self.S:
rgbs_seq = torch.cat(
[rgbs_seq, rgbs_seq[:, -1, None].repeat(1, self.S - S, 1, 1, 1)],
dim=1,
)
S = rgbs_seq.shape[1]
rgbs_ = rgbs_seq.reshape(B * S, C, H, W)
if fmaps_ is None:
fmaps_ = self.fnet(rgbs_)
else:
fmaps_ = torch.cat(
[fmaps_[self.S // 2 :], self.fnet(rgbs_[self.S // 2 :])], dim=0
)
fmaps = fmaps_.reshape(
B, S, self.latent_dim, H // self.stride, W // self.stride
)
else:
fmaps = cached_feat[:, ind : ind + self.S]
if S < self.S:
fmaps = torch.cat(
[fmaps, fmaps[:, -1, None].repeat(1, self.S - S, 1, 1, 1)],
dim=1,
)
curr_wind_points = torch.nonzero(first_positive_sorted_inds < ind + self.S)
if curr_wind_points.shape[0] == 0:
ind = ind + self.S // 2
continue
wind_idx = curr_wind_points[-1] + 1
if wind_idx - prev_wind_idx > 0:
fmaps_sample = fmaps[
:, first_positive_sorted_inds[prev_wind_idx:wind_idx] - ind
]
feat_init_ = bilinear_sample2d(
fmaps_sample,
coords_init_[:, 0, prev_wind_idx:wind_idx, 0],
coords_init_[:, 0, prev_wind_idx:wind_idx, 1],
).permute(0, 2, 1)
feat_init_ = feat_init_.unsqueeze(1).repeat(1, self.S, 1, 1)
feat_init = smart_cat(feat_init, feat_init_, dim=2)
if prev_wind_idx > 0:
new_coords = coords[-1][:, self.S // 2 :] / float(self.stride)
coords_init_[:, : self.S // 2, :prev_wind_idx] = new_coords
coords_init_[:, self.S // 2 :, :prev_wind_idx] = new_coords[
:, -1
].repeat(1, self.S // 2, 1, 1)
new_vis = vis[:, self.S // 2 :].unsqueeze(-1)
vis_init_[:, : self.S // 2, :prev_wind_idx] = new_vis
vis_init_[:, self.S // 2 :, :prev_wind_idx] = new_vis[:, -1].repeat(
1, self.S // 2, 1, 1
)
coords, vis, __ = self.forward_iteration(
fmaps=fmaps,
coords_init=coords_init_[:, :, :wind_idx],
feat_init=feat_init[:, :, :wind_idx],
vis_init=vis_init_[:, :, :wind_idx],
track_mask=track_mask_[:, ind : ind + self.S, :wind_idx],
iters=iters,
)
if is_train:
vis_predictions.append(torch.sigmoid(vis[:, :S_local]))
coord_predictions.append([coord[:, :S_local] for coord in coords])
wind_inds.append(wind_idx)
traj_e[:, ind : ind + self.S, :wind_idx] = coords[-1][:, :S_local]
vis_e[:, ind : ind + self.S, :wind_idx] = vis[:, :S_local]
track_mask_[:, : ind + self.S, :wind_idx] = 0.0
ind = ind + self.S // 2
prev_wind_idx = wind_idx
traj_e = traj_e[:, :, inv_sort_inds]
vis_e = vis_e[:, :, inv_sort_inds]
vis_e = torch.sigmoid(vis_e)
train_data = (
(vis_predictions, coord_predictions, wind_inds, sort_inds)
if is_train
else None
)
return traj_e, feat_init, vis_e, train_data
# Path: dot/models/shelf/cotracker_utils/models/core/cotracker/cotracker.py
def get_points_on_a_grid(grid_size, interp_shape, grid_center=(0, 0), device="cpu"):
if grid_size == 1:
return torch.tensor([interp_shape[1] / 2, interp_shape[0] / 2], device=device)[
None, None
]
grid_y, grid_x = meshgrid2d(
1, grid_size, grid_size, stack=False, norm=False, device=device
)
step = interp_shape[1] // 64
if grid_center[0] != 0 or grid_center[1] != 0:
grid_y = grid_y - grid_size / 2.0
grid_x = grid_x - grid_size / 2.0
grid_y = step + grid_y.reshape(1, -1) / float(grid_size - 1) * (
interp_shape[0] - step * 2
)
grid_x = step + grid_x.reshape(1, -1) / float(grid_size - 1) * (
interp_shape[1] - step * 2
)
grid_y = grid_y + grid_center[0]
grid_x = grid_x + grid_center[1]
xy = torch.stack([grid_x, grid_y], dim=-1).to(device)
return xy
# Path: dot/models/shelf/cotracker_utils/models/evaluation_predictor.py
import torch
import torch.nn.functional as F
from typing import Tuple
from dot.models.shelf.cotracker_utils.models.core.cotracker.cotracker import CoTracker, get_points_on_a_grid
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class EvaluationPredictor(torch.nn.Module):
def __init__(
self,
cotracker_model: CoTracker,
interp_shape: Tuple[int, int] = (384, 512),
grid_size: int = 6,
local_grid_size: int = 6,
single_point: bool = True,
n_iters: int = 6,
) -> None:
super(EvaluationPredictor, self).__init__()
self.grid_size = grid_size
self.local_grid_size = local_grid_size
self.single_point = single_point
self.interp_shape = interp_shape
self.n_iters = n_iters
self.model = cotracker_model
self.model.eval()
def forward(self, video, queries):
queries = queries.clone()
B, T, C, H, W = video.shape
B, N, D = queries.shape
assert D == 3
assert B == 1
rgbs = video.reshape(B * T, C, H, W)
rgbs = F.interpolate(rgbs, tuple(self.interp_shape), mode="bilinear")
rgbs = rgbs.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1])
device = rgbs.device
queries[:, :, 1] *= self.interp_shape[1] / W
queries[:, :, 2] *= self.interp_shape[0] / H
if self.single_point:
traj_e = torch.zeros((B, T, N, 2), device=device)
vis_e = torch.zeros((B, T, N), device=device)
for pind in range((N)):
query = queries[:, pind : pind + 1]
t = query[0, 0, 0].long()
traj_e_pind, vis_e_pind = self._process_one_point(rgbs, query)
traj_e[:, t:, pind : pind + 1] = traj_e_pind[:, :, :1]
vis_e[:, t:, pind : pind + 1] = vis_e_pind[:, :, :1]
else:
if self.grid_size > 0:
xy = get_points_on_a_grid(self.grid_size, rgbs.shape[3:], device=device)
xy = torch.cat([torch.zeros_like(xy[:, :, :1]), xy], dim=2).to(
device
) #
queries = torch.cat([queries, xy], dim=1) #
traj_e, __, vis_e, __ = self.model(
rgbs=rgbs,
queries=queries,
iters=self.n_iters,
)
traj_e[:, :, :, 0] *= W / float(self.interp_shape[1])
traj_e[:, :, :, 1] *= H / float(self.interp_shape[0])
return traj_e, vis_e
def _process_one_point(self, rgbs, query):
t = query[0, 0, 0].long()
| device = rgbs.device |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: cswry/SeeSR
# Path: basicsr/data/degradations.py
def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
"""2D sinc filter
Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
Args:
cutoff (float): cutoff frequency in radians (pi is max)
kernel_size (int): horizontal and vertical size, must be odd.
pad_to (int): pad kernel size to desired size, must be odd or zero.
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
kernel = np.fromfunction(
lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
(x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
(x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])
kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)
kernel = kernel / np.sum(kernel)
if pad_to > kernel_size:
pad_size = (pad_to - kernel_size) // 2
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
return kernel
# Path: basicsr/data/degradations.py
def random_mixed_kernels(kernel_list,
kernel_prob,
kernel_size=21,
sigma_x_range=(0.6, 5),
sigma_y_range=(0.6, 5),
rotation_range=(-math.pi, math.pi),
betag_range=(0.5, 8),
betap_range=(0.5, 8),
noise_range=None,
return_sigma=False):
"""Randomly generate mixed kernels.
Args:
kernel_list (tuple): a list name of kernel types,
support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
'plateau_aniso']
kernel_prob (tuple): corresponding kernel probability for each
kernel type
kernel_size (int):
sigma_x_range (tuple): [0.6, 5]
sigma_y_range (tuple): [0.6, 5]
rotation range (tuple): [-math.pi, math.pi]
beta_range (tuple): [0.5, 8]
noise_range(tuple, optional): multiplicative kernel noise,
[0.75, 1.25]. Default: None
Returns:
kernel (ndarray):
"""
kernel_type = random.choices(kernel_list, kernel_prob)[0]
if not return_sigma:
if kernel_type == 'iso':
kernel = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma)
elif kernel_type == 'aniso':
kernel = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma)
elif kernel_type == 'generalized_iso':
kernel = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=True,
return_sigma=return_sigma)
elif kernel_type == 'generalized_aniso':
kernel = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=False,
return_sigma=return_sigma)
elif kernel_type == 'plateau_iso':
kernel = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma)
elif kernel_type == 'plateau_aniso':
kernel = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma)
return kernel
else:
if kernel_type == 'iso':
kernel, sigma_list = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma)
elif kernel_type == 'aniso':
kernel, sigma_list = random_bivariate_Gaussian(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma)
elif kernel_type == 'generalized_iso':
kernel, sigma_list = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=True,
return_sigma=return_sigma)
elif kernel_type == 'generalized_aniso':
kernel, sigma_list = random_bivariate_generalized_Gaussian(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
betag_range,
noise_range=noise_range,
isotropic=False,
return_sigma=return_sigma)
elif kernel_type == 'plateau_iso':
kernel, sigma_list = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma)
elif kernel_type == 'plateau_aniso':
kernel, sigma_list = random_bivariate_plateau(
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma)
return kernel, sigma_list
# Path: basicsr/data/transforms.py
def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
"""Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
We use vertical flip and transpose for rotation implementation.
All the images in the list use the same augmentation.
Args:
imgs (list[ndarray] | ndarray): Images to be augmented. If the input
is an ndarray, it will be transformed to a list.
hflip (bool): Horizontal flip. Default: True.
rotation (bool): Ratotation. Default: True.
flows (list[ndarray]: Flows to be augmented. If the input is an
ndarray, it will be transformed to a list.
Dimension is (h, w, 2). Default: None.
return_status (bool): Return the status of flip and rotation.
Default: False.
Returns:
list[ndarray] | ndarray: Augmented images and flows. If returned
results only have one element, just return ndarray.
"""
hflip = hflip and random.random() < 0.5
vflip = rotation and random.random() < 0.5
rot90 = rotation and random.random() < 0.5
def _augment(img):
if hflip: # horizontal
cv2.flip(img, 1, img)
if vflip: # vertical
cv2.flip(img, 0, img)
if rot90:
img = img.transpose(1, 0, 2)
return img
def _augment_flow(flow):
if hflip: # horizontal
cv2.flip(flow, 1, flow)
flow[:, :, 0] *= -1
if vflip: # vertical
cv2.flip(flow, 0, flow)
flow[:, :, 1] *= -1
if rot90:
flow = flow.transpose(1, 0, 2)
flow = flow[:, :, [1, 0]]
return flow
if not isinstance(imgs, list):
imgs = [imgs]
imgs = [_augment(img) for img in imgs]
if len(imgs) == 1:
imgs = imgs[0]
if flows is not None:
if not isinstance(flows, list):
flows = [flows]
flows = [_augment_flow(flow) for flow in flows]
if len(flows) == 1:
flows = flows[0]
return imgs, flows
else:
if return_status:
return imgs, (hflip, vflip, rot90)
else:
return imgs
# Path: basicsr/utils/file_client.py
class FileClient(object):
"""A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk",
"memcached" and "lmdb".
client (:obj:`BaseStorageBackend`): The backend object.
"""
_backends = {
'disk': HardDiskBackend,
'memcached': MemcachedBackend,
'lmdb': LmdbBackend,
}
def __init__(self, backend='disk', **kwargs):
if backend not in self._backends:
raise ValueError(f'Backend {backend} is not supported. Currently supported ones'
f' are {list(self._backends.keys())}')
self.backend = backend
self.client = self._backends[backend](**kwargs)
def get(self, filepath, client_key='default'):
# client_key is used only for lmdb, where different fileclients have
# different lmdb environments.
if self.backend == 'lmdb':
return self.client.get(filepath, client_key)
else:
return self.client.get(filepath)
def get_text(self, filepath):
return self.client.get_text(filepath)
# Path: basicsr/utils/img_util.py
def imfrombytes(content, flag='color', float32=False):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
float32 (bool): Whether to change to float32., If True, will also norm
to [0, 1]. Default: False.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img
# Path: basicsr/utils/img_util.py
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
if img.dtype == 'float64':
img = img.astype('float32')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
# Path: basicsr/utils/logger.py
def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel('ERROR')
elif log_file is not None:
logger.setLevel(log_level)
# add file handler
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
initialized_logger[logger_name] = True
return logger
# Path: dataloaders/simple_dataset.py
import cv2
import os
import glob
import torch
import random
import numpy as np
import math
from torch.utils.data import Dataset
from torchvision import transforms
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
from basicsr.data.transforms import augment
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
from PIL import Image
class SimpleDataset(Dataset):
def __init__(self, opt, fix_size=512):
self.opt = opt
self.image_root = opt['gt_path']
self.fix_size = fix_size
exts = ['*.jpg', '*.png']
self.image_list = []
for image_root in self.image_root:
for ext in exts:
image_list = glob.glob(os.path.join(image_root, ext))
self.image_list += image_list
# if add lsdir dataset
image_list = glob.glob(os.path.join(image_root, '00*', ext))
self.image_list += image_list
self.crop_preproc = transforms.Compose([
# transforms.CenterCrop(fix_size),
transforms.Resize(fix_size)
# transforms.RandomHorizontalFlip(),
])
self.img_preproc = transforms.Compose([
transforms.ToTensor(),
])
# blur settings for the first degradation
self.blur_kernel_size = opt['blur_kernel_size']
self.kernel_list = opt['kernel_list']
self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
self.blur_sigma = opt['blur_sigma']
self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
# blur settings for the second degradation
self.blur_kernel_size2 = opt['blur_kernel_size2']
self.kernel_list2 = opt['kernel_list2']
self.kernel_prob2 = opt['kernel_prob2']
self.blur_sigma2 = opt['blur_sigma2']
self.betag_range2 = opt['betag_range2']
self.betap_range2 = opt['betap_range2']
self.sinc_prob2 = opt['sinc_prob2']
# a final sinc filter
self.final_sinc_prob = opt['final_sinc_prob']
self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
# TODO: kernel range is now hard-coded, should be in the configure file
self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
self.pulse_tensor[10, 10] = 1
print(f'The dataset length: {len(self.image_list)}')
def __getitem__(self, index):
image = Image.open(self.image_list[index]).convert('RGB')
# width, height = image.size
# if width > height:
# width_after = self.fix_size
# height_after = int(height*width_after/width)
# elif height > width:
# height_after = self.fix_size
# width_after = int(width*height_after/height)
# elif height == width:
# height_after = self.fix_size
# width_after = self.fix_size
image = image.resize((self.fix_size, self.fix_size),Image.LANCZOS)
# image = self.crop_preproc(image)
image = self.img_preproc(image)
# ------------------------ Generate kernels (used in the first degradation) ------------------------ #
kernel_size = random.choice(self.kernel_range)
if np.random.uniform() < self.opt['sinc_prob']:
# this sinc filter setting is for kernels ranging from [7, 21]
if kernel_size < 13:
omega_c = np.random.uniform(np.pi / 3, np.pi)
else:
omega_c = np.random.uniform(np.pi / 5, np.pi)
kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
else:
kernel = random_mixed_kernels(
self.kernel_list,
self.kernel_prob,
kernel_size,
self.blur_sigma,
self.blur_sigma, [-math.pi, math.pi],
self.betag_range,
self.betap_range,
noise_range=None)
# pad kernel
pad_size = (21 - kernel_size) // 2
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
# ------------------------ Generate kernels (used in the second degradation) ------------------------ #
kernel_size = random.choice(self.kernel_range)
if np.random.uniform() < self.opt['sinc_prob2']:
if kernel_size < 13:
omega_c = np.random.uniform(np.pi / 3, np.pi)
else:
omega_c = np.random.uniform(np.pi / 5, np.pi)
kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
else:
kernel2 = random_mixed_kernels(
self.kernel_list2,
self.kernel_prob2,
kernel_size,
self.blur_sigma2,
| self.blur_sigma2, [-math.pi, math.pi], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: UnderstandLingBV/LLaMa2lang
# Path: translators/m2m.py
class M2MTranslator(BaseTranslator):
def __init__(self, device, quant4, quant4_config, quant8, max_length, model_size):
super().__init__(device, quant4, quant4_config, quant8, max_length)
self.model_size = model_size
model_name = f'facebook/m2m100_{self.model_size}'
# Load model and tokenizer
if self.quant4:
model = M2M100ForConditionalGeneration.from_pretrained(model_name, device_map=device, quantization_config=self.quant4_config, load_in_4bit=True)
elif self.quant8:
model = M2M100ForConditionalGeneration.from_pretrained(model_name, device_map=self.device, load_in_8bit=True)
else:
model = M2M100ForConditionalGeneration.from_pretrained(model_name).to(self.device)
tokenizer = M2M100Tokenizer.from_pretrained(model_name)
self.model = model
self.tokenizer = tokenizer
def translate(self, texts, source_lang, target_lang):
# Small fix for odd language codes
if source_lang == 'pt-BR':
source_lang = 'pt'
if source_lang == 'uk-UA':
source_lang = 'uk'
with torch.no_grad():
if source_lang == 'eu':
# Not supported by M2M
return None
# Set the source language for the tokenizer
self.tokenizer.src_lang = source_lang
if self.max_length is None:
encoded_batch = self.tokenizer(texts, return_tensors="pt", padding=True).to(self.device)
generated_tokens = self.model.generate(**encoded_batch, forced_bos_token_id=self.tokenizer.get_lang_id(target_lang))
else:
encoded_batch = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=self.max_length).to(self.device)
generated_tokens = self.model.generate(**encoded_batch, max_length=self.max_length, forced_bos_token_id=self.tokenizer.get_lang_id(target_lang))
translated_texts = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
return translated_texts
# Path: translators/madlad.py
class MADLADTranslator(BaseTranslator):
def __init__(self, device, quant4, quant4_config, quant8, max_length, model_size):
super().__init__(device, quant4, quant4_config, quant8, max_length)
self.model_size = model_size
model_name = f'google/madlad400-{self.model_size}-mt'
# Quick rewrite the model name for bt
if self.model_size == '7b-bt':
model_name = f'google/madlad400-{self.model_size}-mt-bt'
# Load model and tokenizer
if self.quant4:
model = T5ForConditionalGeneration.from_pretrained(model_name, device_map=device, quantization_config=self.quant4_config, load_in_4bit=True)
elif self.quant8:
model = T5ForConditionalGeneration.from_pretrained(model_name, device_map=self.device, load_in_8bit=True)
else:
model = T5ForConditionalGeneration.from_pretrained(model_name).to(self.device)
tokenizer = T5Tokenizer.from_pretrained(model_name)
self.model = model
self.tokenizer = tokenizer
def translate(self, texts, source_lang, target_lang):
# Small fix for odd language codes
if source_lang == 'pt-BR':
source_lang = 'pt'
if source_lang == 'uk-UA':
source_lang = 'uk'
with torch.no_grad():
# Preprocess texts and add target language prefix
madlad_texts = [f'<2{target_lang}> ' + text.replace("\n", " ") for text in texts]
if self.max_length is None:
encoded_batch = self.tokenizer(madlad_texts, return_tensors="pt", padding=True).to(self.device)
outputs = self.model.generate(input_ids=encoded_batch['input_ids'], max_new_tokens=2048) # max_new_tokens is required otherwise we get 20
else:
encoded_batch = self.tokenizer(madlad_texts, return_tensors="pt", padding=True, truncation=True, max_length=self.max_length).to(self.device)
outputs = self.model.generate(input_ids=encoded_batch['input_ids'], max_new_tokens=self.max_length)
translated_texts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
return translated_texts
# Path: translators/mbart.py
class mBARTTranslator(BaseTranslator):
language_mapping = {
'ar': 'ar_AR',
'cs': 'cs_CZ',
'de': 'de_DE',
'en': 'en_XX',
'es': 'es_XX',
'et': 'et_EE',
'fi': 'fi_FI',
'fr': 'fr_XX',
'gu': 'gu_IN',
'hi': 'hi_IN',
'it': 'it_IT',
'ja': 'ja_XX',
'kk': 'kk_KZ',
'ko': 'ko_KR',
'lt': 'lt_LT',
'lv': 'lv_LV',
'my': 'my_MM',
'ne': 'ne_NP',
'nl': 'nl_XX',
'ro': 'ro_RO',
'ru': 'ru_RU',
'si': 'si_LK',
'tr': 'tr_TR',
'vi': 'vi_VN',
'zh': 'zh_CN',
'af': 'af_ZA',
'az': 'az_AZ',
'bn': 'bn_IN',
'fa': 'fa_IR',
'he': 'he_IL',
'hr': 'hr_HR',
'id': 'id_ID',
'ka': 'ka_GE',
'km': 'km_KH',
'mk': 'mk_MK',
'ml': 'ml_IN',
'mn': 'mn_MN',
'mr': 'mr_IN',
'pl': 'pl_PL',
'ps': 'ps_AF',
'pt': 'pt_XX',
'pt-BR': 'pt_XX',
'sv': 'sv_SE',
'sw': 'sw_KE',
'ta': 'ta_IN',
'te': 'te_IN',
'th': 'th_TH',
'tl': 'tl_XX',
'uk_UA': 'uk_UA',
'uk': 'uk_UA',
'ur': 'ur_PK',
'xh': 'xh_ZA',
'gl': 'gl_ES',
'sl': 'sl_SI'
}
def __init__(self, device, quant4, quant4_config, quant8, max_length):
super().__init__(device, quant4, quant4_config, quant8, max_length)
model_name = 'facebook/mbart-large-50-many-to-many-mmt'
# Load model and tokenizer
if self.quant4:
model = MBartForConditionalGeneration.from_pretrained(model_name, device_map=device, quantization_config=self.quant4_config, load_in_4bit=True)
elif self.quant8:
model = MBartForConditionalGeneration.from_pretrained(model_name, device_map=self.device, load_in_8bit=True)
else:
model = MBartForConditionalGeneration.from_pretrained(model_name).to(self.device)
tokenizer = MBart50TokenizerFast.from_pretrained(model_name)
self.model = model
self.tokenizer = tokenizer
self.printed_error_langs = {}
def translate(self, texts, source_lang, target_lang):
if source_lang in self.language_mapping:
self.tokenizer.src_lang = self.language_mapping[source_lang]
with torch.no_grad():
if self.max_length is None:
encoded_batch = self.tokenizer(texts, return_tensors="pt", padding=True).to(self.device)
else:
encoded_batch = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=self.max_length).to(self.device)
outputs = self.model.generate(**encoded_batch, forced_bos_token_id=self.tokenizer.lang_code_to_id[target_lang])
translated_texts = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
return translated_texts
else:
if not(source_lang in self.printed_error_langs):
print(f"[---- LLaMa2Lang ----] mBART cannot translate from source language {source_lang}, returning originals")
self.printed_error_langs[source_lang] = True
return None
# Path: translators/nllb.py
class NLLBTranslator(BaseTranslator):
language_mapping = {
'en': 'eng_Latn',
'es': 'spa_Latn',
'de': 'deu_Latn',
'ru': 'rus_Cyrl',
'ja': 'jpn_Jpan',
'pt-BR': 'por_Latn',
'ca': 'cat_Latn',
'fr': 'fra_Latn',
'pl': 'pol_Latn',
'vi': 'vie_Latn',
'zh': 'zho_Hant',
'hu': 'hun_Latn',
'ko': 'kor_Hang',
'eu': 'eus_Latn',
'it': 'ita_Latn',
'uk-UA': 'ukr_Cyrl',
'uk': 'ukr_Cyrl',
'id': 'ind_Latn',
'ar': 'arb_Arab',
'fi': 'fin_Latn',
'tr': 'tur_Latn',
'da': 'dan_Latn',
'th': 'tha_Thai',
'sv': 'swe_Latn',
'cs': 'ces_Latn'
}
def __init__(self, device, quant4, quant4_config, quant8, max_length, model_size):
super().__init__(device, quant4, quant4_config, quant8, max_length)
model_name = f'facebook/nllb-200-{model_size}'
# Load model and tokenizer
if self.quant4:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map=device, quantization_config=self.quant4_config, load_in_4bit=True)
elif self.quant8:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map=self.device, load_in_8bit=True)
else:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(self.device)
tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = model
self.tokenizer = tokenizer
def translate(self, texts, source_lang, target_lang):
self.tokenizer.src_lang = self.language_mapping[source_lang]
with torch.no_grad():
if self.max_length is None:
encoded_batch = self.tokenizer(texts, return_tensors="pt", padding=True).to(self.device)
else:
encoded_batch = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=self.max_length).to(self.device)
outputs = self.model.generate(**encoded_batch, forced_bos_token_id=self.tokenizer.lang_code_to_id[target_lang])
translated_texts = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
return translated_texts
# Path: translators/opus.py
class OPUSTranslator(BaseTranslator):
def __init__(self, device, quant4, quant4_config, quant8, max_length):
super().__init__(device, quant4, quant4_config, quant8, max_length)
# Cache for loaded translation models, seemingly faster than letting Huggingface handle it
self.model_cache = {}
# Alternative models that are not created by Helsink-NLP
self.alternative_models = {
"en-pl": 'gsarti/opus-mt-tc-en-pl',
"en-ja": 'gsarti/opus-mt-tc-base-en-ja'
}
def translate(self, texts, source_lang, target_lang):
with torch.no_grad():
model, tokenizer = self.get_helsinki_nlp_model(source_lang, target_lang)
if model is None or tokenizer is None:
# Try via intermediate language
model_i, tokenizer_i = self.get_helsinki_nlp_model(source_lang, 'en')
model_t, tokenizer_t = self.get_helsinki_nlp_model('en', target_lang)
if model_i is None or tokenizer_i is None or model_t is None or tokenizer_t is None:
return None
# To intermediate language first
if self.max_length is None:
# OPUS crashes if we pass it more than 512 tokens
inputs = tokenizer_i(texts, padding=True, truncation=True, max_length=512, return_tensors="pt").to(self.device)
translated_outputs = model_i.generate(inputs.input_ids)
else:
inputs = tokenizer_i(texts, padding=True, truncation=True, return_tensors="pt", max_length=self.max_length).to(self.device)
translated_outputs = model_i.generate(inputs.input_ids, max_length=self.max_length)
intermediate_texts = [tokenizer_i.decode(output, skip_special_tokens=True) for output in translated_outputs]
# Now to target
if self.max_length is None:
inputs = tokenizer_t(intermediate_texts, padding=True, truncation=True, max_length=512, return_tensors="pt").to(self.device)
translated_outputs = model_t.generate(inputs.input_ids)
else:
inputs = tokenizer_t(intermediate_texts, padding=True, truncation=True, return_tensors="pt", max_length=self.max_length).to(self.device)
translated_outputs = model_t.generate(inputs.input_ids, max_length=self.max_length)
translated_texts = [tokenizer_t.decode(output, skip_special_tokens=True) for output in translated_outputs]
return translated_texts
else:
if self.max_length is None:
inputs = tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors="pt").to(self.device)
translated_outputs = model.generate(inputs.input_ids)
else:
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="pt", max_length=self.max_length).to(self.device)
translated_outputs = model.generate(inputs.input_ids, max_length=self.max_length)
translated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in translated_outputs]
return translated_texts
def load_model(self, model_name, model_key):
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Apply quantization if needed
if self.quant4:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map=self.device, quantization_config=self.quant4_config, load_in_4bit=True)
elif self.quant8:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map=self.device, load_in_8bit=True)
else:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(self.device)
self.model_cache[model_key] = (model, tokenizer)
return model, tokenizer
# Tries to obtain a translation model from the Helsinki-NLP groups OPUS models. Returns None, None if no model is found for this language pair
def get_helsinki_nlp_model(self, source_lang, target_lang):
# Small fix for odd language codes
if source_lang == 'pt-BR':
source_lang = 'bzs'
if source_lang == 'uk-UA':
source_lang = 'uk'
model_key = f'{source_lang}-{target_lang}'
if model_key in self.model_cache:
return self.model_cache[model_key]
model_name = f'Helsinki-NLP/opus-mt-{source_lang}-{target_lang}'
try:
return self.load_model(model_name, model_key)
except OSError as e:
# Try to load the tc-big naming convention files
try:
model_name = f'Helsinki-NLP/opus-mt-tc-big-{source_lang}-{target_lang}'
return self.load_model(model_name, model_key)
except OSError as e:
try:
model_name = self.alternative_models[model_key]
return self.load_model(model_name, model_key)
except Exception as e:
print(f"[---- LLaMa2Lang ----] No translation possible from {source_lang} to {target_lang}")
return None, None
# Path: translate.py
import os
import torch
import json
import re
import gc
import argparse
from datasets import load_dataset
from transformers import BitsAndBytesConfig
from tqdm import tqdm
from translators.m2m import M2MTranslator
from translators.madlad import MADLADTranslator
from translators.mbart import mBARTTranslator
from translators.nllb import NLLBTranslator
from translators.opus import OPUSTranslator
# Find the max checkpoint number to continue from
def find_largest_checkpoint(checkpoint_location):
pattern = r'upto_(\d+).json'
files = os.listdir(checkpoint_location)
| numbers = [int(re.search(pattern, file).group(1)) for file in files if re.match(pattern, file)] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gordicaleksa/serbian-llm-eval
# Path: lm_eval/utils.py
class ExitCodeError(Exception):
class MultiChoice:
class Reorderer:
def sh(x):
def escaped_split(text, sep_char, maxsplit=-1):
def simple_parse_args_string(args_string):
def join_iters(iters):
def chunks(iter, n=0, fn=None):
def group(arr, fn):
def _is_json_task(task_name):
def __init__(self, choices):
def __contains__(self, values):
def __iter__(self):
def pattern_match(patterns, source_list):
def general_detokenize(string):
def get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):
def make_disjoint_window(pair):
def select_continuation_from_batch_left_padding(
generations: Union[List[List[int]], torch.Tensor], max_context_size: int
):
def __init__(self, arr, fn):
def get_reordered(self):
def get_original(self, newarr):
def positional_deprecated(fn):
def _wrapper(*args, **kwargs):
def find_test_root(start_path: pathlib.Path) -> pathlib.Path:
def run_task_tests(task_list: List[str]):
def clear_torch_cache():
# Path: lm_eval/base.py
class BaseLM(LM):
def __init__(self):
super().__init__()
self.batch_schedule = 1
self.batch_sizes = {}
self.max_batch_size = 512
@property
@abstractmethod
def eot_token_id(self):
pass
@property
@abstractmethod
def max_length(self):
pass
@property
@abstractmethod
def max_gen_toks(self):
pass
@property
@abstractmethod
def batch_size(self):
pass
@property
@abstractmethod
def device(self):
pass
@abstractmethod
def tok_encode(self, string: str):
pass
@abstractmethod
def tok_decode(self, tokens: Iterable[int]):
pass
@abstractmethod
def _model_generate(self, context, max_length, eos_token_id):
pass
@abstractmethod
def _model_call(self, inps):
"""
inps: a torch tensor of shape [batch, sequence]
the size of sequence may vary from call to call
returns: a torch tensor of shape [batch, sequence, vocab] with the
logits returned from the model
"""
pass
def _detect_batch_size(self, requests=None, pos=0):
if requests:
_, context_enc, continuation_enc = requests[pos]
max_length = len(
(context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]
)
else:
max_length = self.max_length
# if OOM, then halves batch_size and tries again
@find_executable_batch_size(starting_batch_size=self.max_batch_size)
def forward_batch(batch_size):
test_batch = torch.ones((batch_size, max_length), device=self.device).long()
for _ in range(5):
_ = F.log_softmax(self._model_call(test_batch), dim=-1).cpu()
return batch_size
batch_size = forward_batch()
utils.clear_torch_cache()
return batch_size
# subclass must implement properties vocab_size, eot_token_id, max_gen_toks, batch_size, device, max_length.
# TODO: enforce this somehow
def _encode_pair(self, context, continuation):
n_spaces = len(context) - len(context.rstrip())
if n_spaces > 0:
continuation = context[-n_spaces:] + continuation
context = context[:-n_spaces]
whole_enc = self.tok_encode(context + continuation)
context_enc = self.tok_encode(context)
context_enc_len = len(context_enc)
continuation_enc = whole_enc[context_enc_len:]
return context_enc, continuation_enc
def loglikelihood(self, requests):
new_reqs = []
for context, continuation in requests:
if context == "":
# end of text as context
context_enc, continuation_enc = [self.eot_token_id], self.tok_encode(
continuation
)
else:
context_enc, continuation_enc = self._encode_pair(context, continuation)
new_reqs.append(((context, continuation), context_enc, continuation_enc))
return self._loglikelihood_tokens(new_reqs)
def loglikelihood_rolling(self, requests):
# TODO: Implement caching once we've confirmed the perplexity implementation
# automatic batch size detection for vectorization
adaptive_batch_size = None
if self.batch_size == "auto":
# using rolling window with maximum context
print("Passed argument batch_size = auto. Detecting largest batch size")
batch_size = self._detect_batch_size()
print(f"Determined Largest batch size: {batch_size}")
adaptive_batch_size = batch_size
loglikelihoods = []
for (string,) in tqdm(requests):
rolling_token_windows = list(
map(
utils.make_disjoint_window,
utils.get_rolling_token_windows(
token_list=self.tok_encode(string),
prefix_token=self.eot_token_id,
max_seq_len=self.max_length,
context_len=1,
),
)
)
rolling_token_windows = [(None,) + x for x in rolling_token_windows]
# TODO: extract out this call so it only gets called once and also somehow figure out partial caching for
# that
string_nll = self._loglikelihood_tokens(
rolling_token_windows,
disable_tqdm=True,
override_bs=adaptive_batch_size,
)
# discard is_greedy
string_nll = [x[0] for x in string_nll]
string_nll = sum(string_nll)
loglikelihoods.append(string_nll)
return loglikelihoods
def _loglikelihood_tokens(self, requests, disable_tqdm=False, override_bs=None):
# TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
res = []
def _collate(x):
# the negative sign on len(toks) sorts descending - this has a few advantages:
# - time estimates will always be over not underestimates, which is more useful for planning
# - to know the size of a batch when going through the list, you know the first one is always the batch
# padded context length. this is useful to simplify the batching logic and more importantly to make
# automatic adaptive batches much much easier to implement
# - any OOMs will happen right away rather than near the end
toks = x[1] + x[2]
return -len(toks), tuple(toks)
re_ord = utils.Reorderer(requests, _collate)
reordered_requests = re_ord.get_reordered()
n_reordered_requests = len(reordered_requests)
# automatic (variable) batch size detection for vectorization
# pull longest context sample from request
def _batch_scheduler(pos):
sched = pos // int(n_reordered_requests / self.batch_schedule)
if sched in self.batch_sizes:
return self.batch_sizes[sched]
print(
f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size"
)
self.batch_sizes[sched] = self._detect_batch_size(reordered_requests, pos)
print(f"Determined largest batch size: {self.batch_sizes[sched]}")
return self.batch_sizes[sched]
for chunk in utils.chunks(
tqdm(reordered_requests, disable=disable_tqdm),
n=self.batch_size
if self.batch_size != "auto"
else override_bs
if override_bs is not None
else 0,
fn=_batch_scheduler
if self.batch_size == "auto"
and n_reordered_requests > 0
and not override_bs
else None,
):
inps = []
cont_toks_list = []
inplens = []
padding_length = None
# because vectorizing is annoying, we first convert each (context, continuation) pair to padded
# tensors, then we pack them together into a batch, call the model, and then pick it all apart
# again because vectorizing is annoying
for _, context_enc, continuation_enc in chunk:
# sanity check
assert len(context_enc) > 0
assert len(continuation_enc) > 0
assert len(continuation_enc) <= self.max_length
# how this all works:
# CTX CONT
# inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
# gpt2 \ \
# logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
# cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
# when too long to fit in context, truncate from the left
inp = torch.tensor(
(context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
dtype=torch.long,
).to(self.device)
(inplen,) = inp.shape
cont = continuation_enc
# since in _collate we make sure length is descending, the longest is always the first one.
padding_length = (
padding_length if padding_length is not None else inplen
)
# pad length from seq to padding_length
inp = torch.cat(
[
inp, # [seq]
torch.zeros(padding_length - inplen, dtype=torch.long).to(
inp.device
), # [padding_length - seq]
],
dim=0,
)
inps.append(inp.unsqueeze(0)) # [1, padding_length]
cont_toks_list.append(cont)
inplens.append(inplen)
batched_inps = torch.cat(inps, dim=0) # [batch, padding_length]
multi_logits = F.log_softmax(
self._model_call(batched_inps), dim=-1
).cpu() # [batch, padding_length, vocab]
for (cache_key, _, _), logits, inp, inplen, cont_toks in zip(
chunk, multi_logits, inps, inplens, cont_toks_list
):
# Slice to original seq length
contlen = len(cont_toks)
inplen = inplen + (
logits.shape[0] - padding_length
) # if "virtual tokens" (from prompt tuning) are added, inplen is larger
logits = logits[inplen - contlen : inplen].unsqueeze(
0
) # [1, seq, vocab]
# Check if per-token argmax is exactly equal to continuation
greedy_tokens = logits.argmax(dim=-1)
cont_toks = torch.tensor(cont_toks, dtype=torch.long).unsqueeze(
0
) # [1, seq]
max_equal = (greedy_tokens == cont_toks).all()
# Obtain log-probs at the corresponding continuation token indices
# last_token_slice = logits[:, -1, :].squeeze(0).tolist()
logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
-1
) # [1, seq]
# Answer: (log prob, is-exact-match)
answer = (float(logits.sum()), bool(max_equal))
# partial caching
if cache_key is not None:
self.cache_hook.add_partial("loglikelihood", cache_key, answer)
res.append(answer)
return re_ord.get_original(res)
def greedy_until(self, requests):
# TODO: implement fully general `until` that handles until that are
# multiple tokens or that span multiple tokens correctly
# TODO: extract to TokenizedLM?
res = []
def _collate(x):
# the negative sign on len(toks) sorts descending - this has a few advantages:
# - time estimates will always be over not underestimates, which is more useful for planning
# - to know the size of a batch when going through the list, you know the first one is always the batch
# padded context length. this is useful to simplify the batching logic and more importantly to make
# automatic adaptive batches much much easier to implement
# - any OOMs will happen right away rather than near the end
toks = self.tok_encode(x[0])
return -len(toks), x[0]
re_ord = utils.Reorderer(requests, _collate)
warn_stop_seq = False
for context, request_args in tqdm(re_ord.get_reordered()):
until = request_args["until"]
if isinstance(until, str):
until = [until]
if until:
try:
(primary_until,) = self.tok_encode(until[0])
except ValueError:
if not warn_stop_seq:
print(
"Warning: a primary stop sequence is multi-token! Will default to EOS token for this tokenizer. Consider using `hf-causal-experimental` for multi-token stop sequence support for the time being."
)
warn_stop_seq = True
primary_until = self.eot_token_id
else:
primary_until = None
context_enc = torch.tensor(
[self.tok_encode(context)[self.max_gen_toks - self.max_length :]]
).to(self.device)
max_gen_tokens = min(
self.max_gen_toks, request_args.get("max_length", self.max_gen_toks)
)
cont = self._model_generate(
context_enc, context_enc.shape[1] + max_gen_tokens, primary_until
)
s = self.tok_decode(cont[0].tolist()[context_enc.shape[1] :])
for term in until:
s = s.split(term)[0]
# partial caching
self.cache_hook.add_partial("greedy_until", (context, until), s)
res.append(s)
return re_ord.get_original(res)
# Path: lm_eval/models/deepsparse.py
from typing import List, Optional, Tuple, Union
from tqdm import tqdm
from lm_eval import utils
from lm_eval.base import BaseLM
import random
import numpy
import torch
import deepsparse
class DeepSparseLM(BaseLM):
# Default max sequence length setting for when no `max_length` is provided
_DEFAULT_MAX_LENGTH = 2048
def __init__(
self,
pretrained: str,
tokenizer: Optional[str] = None,
batch_size: Optional[Union[int, str]] = 1,
max_gen_toks: Optional[int] = 256,
| max_length: Optional[int] = None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mu-cai/ViP-LLaVA
# Path: llava/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: llava/constants.py
DEFAULT_IMAGE_TOKEN = "<image>"
# Path: llava/constants.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: llava/constants.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: llava/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: llava/model/builder.py
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", **kwargs):
kwargs = {"device_map": device_map, **kwargs}
if device != "cuda":
kwargs['device_map'] = {"": device}
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
kwargs['torch_dtype'] = torch.float16
if 'llava' in model_name.lower():
# Load LLaVA model
if 'lora' in model_name.lower() and model_base is None:
warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading LLaVA from base model...')
model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional LLaVA weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
from huggingface_hub import hf_hub_download
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
from peft import PeftModel
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
elif model_base is not None:
# this may be mm projector only
print('Loading LLaVA from base model...')
if 'mpt' in model_name.lower():
if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):
shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
model.load_state_dict(mm_projector_weights, strict=False)
else:
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
# Load language model
if model_base is not None:
# PEFT model
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
print('Convert to FP16...')
model.to(torch.float16)
else:
use_fast = False
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if 'llava' in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
model.resize_token_embeddings(len(tokenizer))
vision_tower = model.get_vision_tower()
if not vision_tower.is_loaded:
vision_tower.load_model()
vision_tower.to(device=device, dtype=torch.float16)
image_processor = vision_tower.image_processor
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len
# Path: llava/utils.py
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
# Path: llava/mm_utils.py
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
# Path: llava/mm_utils.py
def get_model_name_from_path(model_path):
model_path = model_path.strip("/")
model_paths = model_path.split("/")
if model_paths[-1].startswith('checkpoint-'):
return model_paths[-2] + "_" + model_paths[-1]
else:
return model_paths[-1]
# Path: llava/mm_utils.py
class KeywordsStoppingCriteria(StoppingCriteria):
def __init__(self, keywords, tokenizer, input_ids):
self.keywords = keywords
self.keyword_ids = []
self.max_keyword_len = 0
for keyword in keywords:
cur_keyword_ids = tokenizer(keyword).input_ids
if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
cur_keyword_ids = cur_keyword_ids[1:]
if len(cur_keyword_ids) > self.max_keyword_len:
self.max_keyword_len = len(cur_keyword_ids)
self.keyword_ids.append(torch.tensor(cur_keyword_ids))
self.tokenizer = tokenizer
self.start_len = input_ids.shape[1]
def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
for keyword_id in self.keyword_ids:
if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():
return True
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
for keyword in self.keywords:
if keyword in outputs:
return True
return False
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
outputs = []
for i in range(output_ids.shape[0]):
outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores))
return all(outputs)
# Path: llava/eval/model_vqa_qbench.py
import argparse
import torch
import json
import requests
from tqdm import tqdm
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from PIL import Image
from PIL import Image
from io import BytesIO
def load_image(image_file):
if image_file.startswith('http') or image_file.startswith('https'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def eval_model(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, True)
with open(args.questions_file) as f:
llvqa_data = json.load(f)
for i, llddata in enumerate(tqdm(llvqa_data)):
filename = llddata["img_path"]
if args.lang == "en":
message = llddata["question"] + "\nChoose between one of the options as follows:\n"
elif args.lang == "zh":
message = llddata["question"] + "\在下列选项中选择一个:\n"
else:
raise NotImplementedError("Q-Bench does not support languages other than English (en) and Chinese (zh) yet. Contact us (https://github.com/VQAssessment/Q-Bench/) to convert Q-Bench into more languages.")
for choice, ans in zip(["A.", "B.", "C.", "D."], llddata["candidates"]):
message += f"{choice} {ans}\n"
qs = message
if model.config.mm_use_im_start_end:
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
else:
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
if 'llama-2' in model_name.lower():
conv_mode = "llava_llama_2"
elif "v1" in model_name.lower():
conv_mode = "llava_v1"
elif "mpt" in model_name.lower():
conv_mode = "mpt"
else:
conv_mode = "llava_v0"
if args.conv_mode is not None and conv_mode != args.conv_mode:
print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
else:
args.conv_mode = conv_mode
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
image = load_image(args.image_folder + filename)
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
keywords = [stop_str]
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=image_tensor,
num_beams=1,
do_sample=False,
| temperature=0, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Open3DA/LL3DA
# Path: utils/box_util.py
def box3d_iou_batch_tensor(corners1, corners2):
''' Compute 3D bounding box IoU.
Note: only for axis-aligned bounding boxes
Input:
corners1: PyTorch tensor (N,8,3), assume up direction is Z (batch of N samples)
corners2: PyTorch tensor (N,8,3), assume up direction is Z (batch of N samples)
Output:
iou: an tensor of 3D bounding box IoU (N)
'''
x_min_1, x_max_1, y_min_1, y_max_1, z_min_1, z_max_1 = get_box3d_min_max_batch_tensor(corners1)
x_min_2, x_max_2, y_min_2, y_max_2, z_min_2, z_max_2 = get_box3d_min_max_batch_tensor(corners2)
xA = torch.max(x_min_1, x_min_2)
yA = torch.max(y_min_1, y_min_2)
zA = torch.max(z_min_1, z_min_2)
xB = torch.min(x_max_1, x_max_2)
yB = torch.min(y_max_1, y_max_2)
zB = torch.min(z_max_1, z_max_2)
zeros = corners1.new_zeros(xA.shape).cuda()
inter_vol = torch.max((xB - xA), zeros) * torch.max((yB - yA), zeros) * torch.max((zB - zA), zeros)
box_vol_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) * (z_max_1 - z_min_1)
box_vol_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2) * (z_max_2 - z_min_2)
iou = inter_vol / (box_vol_1 + box_vol_2 - inter_vol + 1e-8)
return iou
# Path: utils/ap_calculator.py
class APCalculator(object):
"""Calculating Average Precision"""
def __init__(
self,
dataset_config,
ap_iou_thresh=[0.25, 0.5],
class2type_map=None,
exact_eval=True,
ap_config_dict=None,
):
"""
Args:
ap_iou_thresh: List of float between 0 and 1.0
IoU threshold to judge whether a prediction is positive.
class2type_map: [optional] dict {class_int:class_name}
"""
self.ap_iou_thresh = ap_iou_thresh
if ap_config_dict is None:
ap_config_dict = get_ap_config_dict(
dataset_config=dataset_config, remove_empty_box=exact_eval
)
self.ap_config_dict = ap_config_dict
self.class2type_map = class2type_map
self.reset()
def make_gt_list(self, gt_box_corners, gt_box_sem_cls_labels, gt_box_present):
batch_gt_map_cls = []
bsize = gt_box_corners.shape[0]
for i in range(bsize):
batch_gt_map_cls.append(
[
(gt_box_sem_cls_labels[i, j].item(), gt_box_corners[i, j])
for j in range(gt_box_corners.shape[1])
if gt_box_present[i, j] == 1
]
)
return batch_gt_map_cls
def step_meter(self, outputs, targets):
if "outputs" in outputs:
outputs = outputs["outputs"]
self.step(
predicted_box_corners=outputs["box_corners"],
sem_cls_probs=outputs["sem_cls_prob"],
objectness_probs=outputs["objectness_prob"],
point_cloud=targets["point_clouds"],
gt_box_corners=targets["gt_box_corners"],
gt_box_sem_cls_labels=targets["gt_box_sem_cls_label"],
gt_box_present=targets["gt_box_present"],
)
def step(
self,
predicted_box_corners,
sem_cls_probs,
objectness_probs,
point_cloud,
gt_box_corners,
gt_box_sem_cls_labels,
gt_box_present,
):
"""
Perform NMS on predicted boxes and threshold them according to score.
Convert GT boxes
"""
gt_box_corners = gt_box_corners.cpu().detach().numpy()
gt_box_sem_cls_labels = gt_box_sem_cls_labels.cpu().detach().numpy()
gt_box_present = gt_box_present.cpu().detach().numpy()
batch_gt_map_cls = self.make_gt_list(
gt_box_corners, gt_box_sem_cls_labels, gt_box_present
)
batch_pred_map_cls = parse_predictions(
predicted_box_corners,
sem_cls_probs,
objectness_probs,
point_cloud,
self.ap_config_dict,
)
self.accumulate(batch_pred_map_cls, batch_gt_map_cls)
def accumulate(self, batch_pred_map_cls, batch_gt_map_cls):
"""Accumulate one batch of prediction and groundtruth.
Args:
batch_pred_map_cls: a list of lists [[(pred_cls, pred_box_params, score),...],...]
batch_gt_map_cls: a list of lists [[(gt_cls, gt_box_params),...],...]
should have the same length with batch_pred_map_cls (batch_size)
"""
bsize = len(batch_pred_map_cls)
assert bsize == len(batch_gt_map_cls)
for i in range(bsize):
self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]
self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]
self.scan_cnt += 1
def compute_metrics(self):
"""Use accumulated predictions and groundtruths to compute Average Precision."""
overall_ret = OrderedDict()
for ap_iou_thresh in self.ap_iou_thresh:
ret_dict = OrderedDict()
rec, prec, ap = eval_det_multiprocessing(
self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh
)
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
ret_dict["%s Average Precision" % (clsname)] = ap[key]
ap_vals = np.array(list(ap.values()), dtype=np.float32)
ap_vals[np.isnan(ap_vals)] = 0
ret_dict["mAP"] = ap_vals.mean()
rec_list = []
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
try:
ret_dict["%s Recall" % (clsname)] = rec[key][-1]
rec_list.append(rec[key][-1])
except:
ret_dict["%s Recall" % (clsname)] = 0
rec_list.append(0)
ret_dict["AR"] = np.mean(rec_list)
overall_ret[ap_iou_thresh] = ret_dict
return overall_ret
def __str__(self):
overall_ret = self.compute_metrics()
return self.metrics_to_str(overall_ret)
def metrics_to_str(self, overall_ret, per_class=True):
mAP_strs = []
AR_strs = []
per_class_metrics = []
for ap_iou_thresh in self.ap_iou_thresh:
mAP = overall_ret[ap_iou_thresh]["mAP"] * 100
mAP_strs.append(f"{mAP:.2f}")
ar = overall_ret[ap_iou_thresh]["AR"] * 100
AR_strs.append(f"{ar:.2f}")
if per_class:
# per-class metrics
per_class_metrics.append("-" * 5)
per_class_metrics.append(f"IOU Thresh={ap_iou_thresh}")
for x in list(overall_ret[ap_iou_thresh].keys()):
if x == "mAP" or x == "AR":
pass
else:
met_str = f"{x}: {overall_ret[ap_iou_thresh][x]*100:.2f}"
per_class_metrics.append(met_str)
ap_header = [f"mAP{x:.2f}" for x in self.ap_iou_thresh]
ap_str = ", ".join(ap_header)
ap_str += ": " + ", ".join(mAP_strs)
ap_str += "\n"
ar_header = [f"AR{x:.2f}" for x in self.ap_iou_thresh]
ap_str += ", ".join(ar_header)
ap_str += ": " + ", ".join(AR_strs)
if per_class:
per_class_metrics = "\n".join(per_class_metrics)
ap_str += "\n"
ap_str += per_class_metrics
return ap_str
def metrics_to_dict(self, overall_ret):
metrics_dict = {}
for ap_iou_thresh in self.ap_iou_thresh:
metrics_dict[f"mAP_{ap_iou_thresh}"] = (
overall_ret[ap_iou_thresh]["mAP"] * 100
)
metrics_dict[f"AR_{ap_iou_thresh}"] = overall_ret[ap_iou_thresh]["AR"] * 100
return metrics_dict
def reset(self):
self.gt_map_cls = {} # {scan_id: [(classname, bbox)]}
self.pred_map_cls = {} # {scan_id: [(classname, bbox, score)]}
self.scan_cnt = 0
# Path: utils/io.py
def save_checkpoint(
checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
filename=None,
):
if not is_primary():
return
if filename is None:
filename = f"checkpoint_{epoch:04d}.pth"
checkpoint_name = os.path.join(checkpoint_dir, filename)
weight_ckpt = model_no_ddp.state_dict()
parameter_names = list(weight_ckpt.keys())
for name in parameter_names:
if args.filter_name is not None and args.filter_name in name:
weight_ckpt.pop(name)
sd = {
"model": weight_ckpt,
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"args": args,
"best_val_metrics": best_val_metrics,
}
torch.save(sd, checkpoint_name)
# Path: utils/misc.py
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_distributed():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
barrier()
all_reduce_sum(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
# Path: utils/proposal_parser.py
def parse_predictions(
predicted_boxes, sem_cls_probs, objectness_probs, point_cloud,
config_dict=get_ap_config_dict()
):
"""Parse predictions to OBB parameters and suppress overlapping boxes
Args:
end_points: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
sem_cls_probs = sem_cls_probs.detach().cpu().numpy() # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal
pred_sem_cls = np.argmax(sem_cls_probs, -1)
obj_prob = objectness_probs.detach().cpu().numpy()
pred_corners_3d_upright_camera = predicted_boxes.detach().cpu().numpy()
K = pred_corners_3d_upright_camera.shape[1] # K==num_proposal
bsize = pred_corners_3d_upright_camera.shape[0]
nonempty_box_mask = np.ones((bsize, K))
if config_dict["remove_empty_box"]:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = point_cloud.cpu().numpy()[:, :, 0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i, :, :] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3)
box3d = flip_axis_to_depth(box3d)
# pc_in_box, inds = extract_pc_in_box3d(pc, box3d)
pc_in_box, inds = ez_extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i, j] = 0
if nonempty_box_mask[i].sum() == 0:
nonempty_box_mask[i, obj_prob[i].argmax()] = 1
# -------------------------------------
if "no_nms" in config_dict and config_dict["no_nms"]:
# pred_mask = np.ones((bsize, K))
pred_mask = nonempty_box_mask
elif not config_dict["use_3d_nms"]:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K, 5))
for j in range(K):
boxes_2d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_2d_with_prob[j, 2] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_2d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_2d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_2d_with_prob[j, 4] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_2d_faster(
boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict["use_3d_nms"] and (not config_dict["cls_nms"]):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 7))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 2] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 4] = np.max(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 5] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_3d_faster(
boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict["use_3d_nms"] and config_dict["cls_nms"]:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 8))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 2] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 4] = np.max(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 5] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
boxes_3d_with_prob[j, 7] = pred_sem_cls[
i, j
] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_3d_faster_samecls(
boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
return pred_mask
# Path: utils/dist.py
def init_distributed(gpu_id, global_rank, world_size, dist_url, dist_backend):
torch.cuda.set_device(gpu_id)
print(
f"| distributed init (rank {global_rank}) (world {world_size}): {dist_url}",
flush=True,
)
torch.distributed.init_process_group(
backend=dist_backend,
init_method=dist_url,
world_size=world_size,
rank=global_rank,
)
torch.distributed.barrier()
setup_print_for_distributed(is_primary())
# Path: utils/dist.py
def is_distributed():
if not dist.is_available() or not dist.is_initialized():
return False
return True
# Path: utils/dist.py
def is_primary():
return get_rank() == 0
# Path: utils/dist.py
def get_rank():
if not is_distributed():
return 0
return dist.get_rank()
# Path: utils/dist.py
def barrier():
if not is_distributed():
return
torch.distributed.barrier()
# Path: utils/dist.py
def all_reduce_average(tensor):
val = all_reduce_sum(tensor)
return val / get_world_size()
# Path: utils/dist.py
def all_gather_dict(data):
"""
Run all_gather on data which is a dictionary of Tensors
"""
assert isinstance(data, dict)
gathered_dict = {}
for item_key in data:
if isinstance(data[item_key], torch.Tensor):
if is_distributed():
data[item_key] = data[item_key].contiguous()
tensor_list = [torch.empty_like(data[item_key]) for _ in range(get_world_size())]
dist.all_gather(tensor_list, data[item_key])
gathered_tensor = torch.cat(tensor_list, dim=0)
else:
gathered_tensor = data[item_key]
gathered_dict[item_key] = gathered_tensor
return gathered_dict
# Path: engine.py
import os, sys, time, math, json, importlib
import torch
import datetime
import utils.capeval.bleu.bleu as capblue
import utils.capeval.cider.cider as capcider
import utils.capeval.rouge.rouge as caprouge
import utils.capeval.meteor.meteor as capmeteor
from collections import defaultdict, OrderedDict
from utils.box_util import box3d_iou_batch_tensor
from utils.ap_calculator import APCalculator
from utils.io import save_checkpoint
from utils.misc import SmoothedValue
from utils.proposal_parser import parse_predictions
from utils.dist import (
init_distributed,
is_distributed,
is_primary,
get_rank,
barrier,
all_reduce_average,
all_gather_dict
)
dataloaders["train_sampler"].set_epoch(curr_epoch)
for batch_idx, batch_data_label in enumerate(dataloaders['train']):
curr_time = time.time()
curr_iter = curr_epoch * len(dataloaders['train']) + batch_idx
curr_lr = adjust_learning_rate(args, optimizer, curr_iter / max_iters)
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].to(net_device)
# Forward pass
optimizer.zero_grad()
outputs = model(batch_data_label, is_eval=False)
loss = outputs['loss']
loss = all_reduce_average(loss)
if not math.isfinite(loss.item()):
if curr_nan_times < max_tolerant_nan:
logout("Loss in not finite. Skip this training step.")
curr_nan_times += 1
continue
else:
logout("Loss in not finite. Terminate training.")
exit(-1)
curr_nan_times = 0
loss.backward()
if args.clip_gradient > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_gradient)
optimizer.step()
time_delta.update(time.time() - curr_time)
loss_avg.update(loss.item())
# logging
if is_primary() and curr_iter % args.log_every == 0:
mem_mb = torch.cuda.max_memory_allocated() / (1024 ** 2)
eta_seconds = (max_iters - curr_iter) * time_delta.avg
eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
logout(
f"Epoch [{curr_epoch}/{args.max_epoch}]; "
f"Iter [{curr_iter}/{max_iters}]; "
f"Loss {loss_avg.avg:0.2f}; "
f"LR {curr_lr:0.2e}; Iter time {time_delta.avg:0.2f}; "
f"ETA {eta_str}; Mem {mem_mb:0.2f}MB"
)
barrier()
# save ckpt
if is_primary() and (curr_iter + 1) % args.save_every == 0:
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
curr_epoch,
args,
best_val_metrics,
filename=f"checkpoint_{(curr_iter + 1) // 1000}k.pth",
)
# eval
if (curr_iter + 1) % args.eval_every_iteration == 0 \
and (curr_iter + 1) > args.start_eval_after:
eval_metrics = {}
model.eval()
for test_loader in dataloaders['test']:
task_metrics = test_loader.dataset.eval_func(
args,
curr_epoch,
model,
dataset_config,
test_loader,
logout,
curr_train_iter=curr_iter
)
eval_metrics.update(task_metrics)
model.train()
if not best_val_metrics or (
best_val_metrics[args.criterion] < eval_metrics[args.criterion]
):
best_val_metrics = eval_metrics
filename = "checkpoint_best.pth"
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
curr_epoch,
args,
best_val_metrics,
filename="checkpoint_best.pth",
)
if is_primary():
logout(
f"Epoch [{curr_epoch}/{args.max_epoch}] "
f"saved current best val checkpoint at {filename}; "
f"{args.criterion} {eval_metrics[args.criterion]}"
)
# end of an iteration
# end of an epoch
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
curr_epoch,
args,
best_val_metrics,
filename="checkpoint.pth",
)
# end of training
eval_metrics = {}
model.eval()
for test_loader in dataloaders['test']:
task_metrics = test_loader.dataset.eval_func(
args,
| curr_epoch, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: briannlongzhao/DreamDistribution
# Path: prompt_learner.py
class PromptLearner(nn.Module):
"""
PromptLearner class implements learnable prompt embeddings
Input class idx, output learnable prompt embedding of that class
The learnable context has shape (n_cls, n_prompts, n_ctx, dim)
"""
def __init__(
self,
pretrained_model_name_or_path,
classnames,
n_ctx=4,
n_prompts=32,
cls_pos="end",
dtype=torch.float32,
use_classname=True,
customize_prefix=None,
customize_suffix=None,
reparam_samples=4
):
super().__init__()
self.dtype = dtype
self.n_prompts = n_prompts
self.classnames = classnames
self.use_classname = use_classname
self.customize_prefix = customize_prefix
self.customize_suffix = customize_suffix
self.reparam_samples = reparam_samples
if customize_suffix is not None or customize_prefix is not None \
or self.classnames is None or self.use_classname is False: # Disable classname for customize generation
self.use_classname = False
self.classnames = None
self.n_cls = len(self.classnames) if self.classnames is not None else 1
self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="tokenizer")
self.vision_encoder = CLIPVisionModel.from_pretrained("openai/clip-vit-large-patch14")
self.vision_processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14")
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_name_or_path, subfolder="text_encoder")
self.text_encoder = CustomTextEncoder(text_encoder.text_model)
self.vision_encoder.requires_grad_(False)
self.text_encoder.requires_grad_(False)
ctx_dim = self.text_encoder.final_layer_norm.weight.shape[0]
# random initialization
print("Initializing class-specific contexts with prompt distribution learning")
ctx_vectors = torch.empty(self.n_cls, n_prompts, n_ctx, ctx_dim, dtype=self.dtype)
nn.init.normal_(ctx_vectors, std=0.02)
prompt_placeholder = " ".join(["X"] * n_ctx)
print(f"Number of context words (tokens): {n_ctx}")
print(f"Number of prompts per class: {n_prompts}")
self.ctx = nn.Parameter(ctx_vectors) # to be optimized
if self.use_classname:
self.classnames = [name.replace("_", " ") for name in self.classnames]
self.name_lens = [len(self.tokenizer(name).input_ids) - 2 for name in self.classnames]
prompts = [prompt_placeholder + " " + name + "." for name in self.classnames]
else:
self.customize_prefix = "" if self.customize_prefix is None else self.customize_prefix
self.customize_suffix = "" if self.customize_suffix is None else self.customize_suffix
prompts = [(self.customize_prefix + " " + prompt_placeholder + " " + self.customize_suffix).strip()]
self.embedder = self.text_encoder.embeddings
# tokenized_prompts as an anchor for retrieving position of eos token in each class prompt
tokenized_prompts = torch.cat([
self.tokenizer(
p,
max_length=self.tokenizer.model_max_length,
padding="max_length",
truncation=True,
return_tensors="pt"
).input_ids
for p in prompts
]).to(self.embedder.position_ids.device)
with torch.no_grad():
embedding = self.embedder(tokenized_prompts).type(self.dtype)
# These token vectors will be saved when in save_model(),
# but they should be ignored in load_model() as we want to use
# those computed using the current class names
self.register_buffer("token_prefix", embedding[:, :1, :]) # SOS
self.register_buffer("token_suffix", embedding[:, 1 + n_ctx:, :]) # CLS, EOS
self.n_ctx = n_ctx
self.ctx_dim = ctx_dim
self.tokenized_prompts = tokenized_prompts # torch.Tensor
self.n_tokens = self.tokenizer.model_max_length
self.class_token_position = cls_pos
self.means = None
self.stds = None
self.prompt_texts = None
self.prompt_freq = np.ones((self.n_cls, n_prompts))
"""
Interpret the learned context vector by finding the closest word in embedding space
If prompt distribution learning, interpret the mean
"""
def interpret(self, cls_idx=0):
ctx = self.ctx[cls_idx].mean(dim=0)
eow = "</w>"
_, words, _ = interpret_ctx(ctx, tokenizer=self.tokenizer, embedder=self.embedder, topk=1)
if self.use_classname:
if self.class_token_position == "end":
words = words + [[self.classnames[cls_idx]]]
elif self.class_token_position == "front":
words = [[self.classnames[cls_idx]]] + words
elif self.class_token_position == "middle":
words = words[:len(words) / 2] + [[self.class_token_position[cls_idx]]] + words[len(words) / 2:]
else:
if self.customize_prefix:
words = [[self.customize_prefix+eow]] + words
if self.customize_suffix:
words = words + [[eow+self.customize_suffix+eow]]
words = ''.join([word[0] for word in words]).replace(eow, ' ')
words = words.strip()
return words
"""
Concat the input ctx with the tokens of class names represented by cls_idx
Input ctx with shape (B,n_ctx,d) or (B,p,n_ctx,d), cls_idx is list of length B
"""
def concat(self, ctx, cls_idx):
prefix = self.token_prefix[cls_idx]
suffix = self.token_suffix[cls_idx]
if ctx.ndim == 4: # (B,p,n_ctx,d)
p = ctx.shape[1]
prefix = repeat(prefix, "b l d -> b p l d", p=p)
suffix = repeat(suffix, "b l d -> b p l d", p=p)
if self.class_token_position == "end":
prompts = torch.cat(
[
prefix, # (*, 1, dim)
ctx, # (*, n_ctx, dim)
suffix, # (*, *, dim)
],
dim=-2,
)
elif self.class_token_position == "middle":
half_n_ctx = self.n_ctx // 2
prompts = []
for i in range(cls_idx.shape[0]):
name_len = self.name_lens[cls_idx[i]]
prefix_i = prefix[i:i+1, :, :] # keep dim
class_i = suffix[i:i+1, :name_len, :]
suffix_i = suffix[i:i+1, name_len:, :]
ctx_i_half1 = ctx[i:i+1, :half_n_ctx, :]
ctx_i_half2 = ctx[i:i+1, half_n_ctx:, :]
prompt = torch.cat(
[
prefix_i, # (*, 1, dim)
ctx_i_half1, # (*, n_ctx//2, dim)
class_i, # (*, name_len, dim)
ctx_i_half2, # (*, n_ctx//2, dim)
suffix_i, # (*, *, dim)
],
dim=-2,
)
prompts.append(prompt)
prompts = torch.cat(prompts, dim=0)
elif self.class_token_position == "front":
prompts = []
for i in range(cls_idx.shape[0]):
name_len = self.name_lens[cls_idx[i]]
prefix_i = prefix[i:i+1, :, :]
class_i = suffix[i:i+1, :name_len, :]
suffix_i = suffix[i:i+1, name_len:, :]
ctx_i = ctx[i:i+1, :, :]
prompt = torch.cat(
[
prefix_i, # (*, 1, dim)
class_i, # (*, name_len, dim)
ctx_i, # (*, n_ctx, dim)
suffix_i, # (*, *, dim)
],
dim=-2,
)
prompts.append(prompt)
prompts = torch.cat(prompts, dim=0)
else:
raise ValueError
return prompts
"""
Concat the given context vectors with input prefix and suffix pre text encoder
Input ctx: (B,n_prompts,n_ctx,d=1024) or (B,n_ctx,d=1024)
Output: (B,n_prompts,l=77,d=1024) or (B,l=77,d=1024)
"""
def concat_custom(self, ctx, prefix="", suffix=""):
if prefix is None or prefix == "":
prefix_tokens = torch.Tensor(
[[self.tokenizer.bos_token_id, self.tokenizer.eos_token_id]]
).to(self.embedder.position_ids.device).to(torch.int64)
else:
prefix_tokens = self.tokenizer(
prefix,
max_length=self.tokenizer.model_max_length,
padding="max_length",
truncation=True,
return_tensors="pt"
).input_ids.to(self.embedder.position_ids.device)
if suffix is None or suffix == "":
suffix_tokens = torch.Tensor(
[[self.tokenizer.bos_token_id, self.tokenizer.eos_token_id]+[self.tokenizer.pad_token_id]*(self.n_tokens-2)]
).to(self.embedder.position_ids.device).to(torch.int64)
else:
suffix_tokens = self.tokenizer(
suffix,
max_length=self.tokenizer.model_max_length,
padding="max_length",
truncation=True,
return_tensors="pt"
).input_ids.to(self.embedder.position_ids.device)
prefix_eos_position = (prefix_tokens == self.tokenizer.eos_token_id).nonzero()[-1, -1].item()
suffix_eos_position = (suffix_tokens == self.tokenizer.eos_token_id).nonzero()[-1, -1].item()
assert prefix_eos_position + self.n_ctx + suffix_eos_position <= self.n_tokens, "prefix+ctx+suffix too long"
prefix_embeddings = self.embedder(prefix_tokens).type(self.dtype)
suffix_embeddings = self.embedder(suffix_tokens).type(self.dtype)
prefix_embeddings = prefix_embeddings[..., :prefix_eos_position, :]
suffix_embeddings = suffix_embeddings[..., 1:(self.n_tokens-self.n_ctx-prefix_eos_position+1), :]
assert ctx.ndim == 4, "ctx should have shape (B,p,l,d)"
assert ctx.shape[1] == self.n_prompts, f"ctx shape {ctx.shape}[1] should match self.n_prompts={self.n_prompts}"
prefix_embeddings = repeat(prefix_embeddings, "1 l d -> b p l d", b=ctx.shape[0], p=ctx.shape[1])
suffix_embeddings = repeat(suffix_embeddings, "1 l d -> b p l d", b=ctx.shape[0], p=ctx.shape[1])
prompts = torch.cat([prefix_embeddings, ctx, suffix_embeddings], dim=-2)
return prompts
"""
Input pooled_prompt (B,n_prompts,d=1024)
Output orthogonal loss (scalar)
"""
def orthogonal_loss(self, pooled_prompts):
pooled_prompts = normalize(pooled_prompts, dim=-1)
cos_sim = pooled_prompts @ pooled_prompts.transpose(1, 2) # (B,n_prompts,n_prompts)
diag_mask = torch.eye(cos_sim.shape[1], device=cos_sim.device).bool()
cos_sim[:, diag_mask] = 0
loss_per_batch = (cos_sim ** 2).sum(dim=(1, 2)) / (cos_sim.shape[1] * (cos_sim.shape[1] - 1))
loss = loss_per_batch.mean()
return loss
"""
Input class indices (B)
Output tokenized class prompts (B,l=77,d=1024)
"""
def forward(self, cls_idx, imgs=None, interpret=False):
ctx = self.ctx[cls_idx]
if self.use_classname:
prompts = self.concat(ctx, cls_idx)
else:
prompts = self.concat_custom(ctx, self.customize_prefix, self.customize_suffix)
prompts_hidden_state, pooled_prompts = self.text_encoder(
prompts,
pooled=True,
tokenized_prompts=self.tokenized_prompts.to(cls_idx.device)[cls_idx]
)
assert pooled_prompts.ndim == 3, f"pooled_prompts should have shape (B,n_prompts,d), now is {pooled_prompts.shape}"
ortho_loss = self.orthogonal_loss(pooled_prompts)
prompts_hidden_state = self.reparameterize(prompts_hidden_state, n_samples=self.reparam_samples) # (B,n,l,d)
if interpret:
prompts = prompts.mean(dim=1)
_, caption, _ = interpret_ctx(torch.squeeze(prompts), tokenizer=self.tokenizer, embedder=self.embedder, topk=1)
return prompts_hidden_state, caption, ortho_loss
return prompts_hidden_state, ortho_loss
"""
For each class, fit the collection of prompts as a normal distribution
(after concat with classnames/prefix/suffix and post text encoder)
Store in self.means and self.stds
Currently not taking covariance matrix
"""
def fit(self, prefix=None, suffix=None):
self.prompt_texts = []
self.means = np.empty((self.n_cls, self.n_tokens, self.ctx_dim)) # (n_cls*77*1024)
self.stds = np.empty(self.means.shape) # (n_cls,77,1024)
for cls in tqdm(range(self.n_cls), desc="fit prompt learner distribution"):
cls_ctx = self.ctx[cls].unsqueeze(0) # (1,p,l,d) or (1,l,d)
if self.use_classname:
cls_prompts = self.concat(cls_ctx, [cls])
else:
cls_prompts = self.concat_custom(cls_ctx, prefix=prefix, suffix=suffix)
cls_prompts = self.text_encoder(cls_prompts)[0]
self.means[cls] = cls_prompts.mean(dim=0).detach().cpu().numpy()
self.stds[cls] = cls_prompts.std(dim=0).detach().cpu().numpy()
if self.n_prompts == 1:
self.stds[cls] = np.zeros(self.stds[cls].shape)
prompt_text = self.interpret(cls)
self.prompt_texts.append(prompt_text)
"""
After prompts are fit, sample from stored means and stds as input to diffusion
If not prompt distribution learning, return self.forward
"""
def sample(self, cls_idx, interpret=False, reparam_epsilon=None):
assert self.means is not None and self.stds is not None
if reparam_epsilon is not None:
prompt = torch.from_numpy(self.means[cls_idx] + reparam_epsilon * self.stds[cls_idx]).unsqueeze(dim=0)
else:
prompt = torch.from_numpy(normal(self.means[cls_idx], self.stds[cls_idx])).unsqueeze(dim=0)
if interpret:
caption = self.prompt_texts[cls_idx]
return prompt, caption
return prompt
def reparameterize(self, prompts, n_samples=1):
std = torch.std(prompts, dim=1) # (B,l,d)
mu = torch.mean(prompts, dim=1) # (B,l,d)
eps = torch.randn_like(repeat(std, "b l d -> b n l d", n=n_samples)) # (B,n,l,d)
prompt = mu.unsqueeze(1) + eps * std.unsqueeze(1)
return prompt # (B,n,l,d)
"""
For each class, fit the collection of prompts as a normal distribution (pre text encoder)
Store in self.ctx_means and self.ctx_stds
Experimental use only
"""
def fit_ctx(self, prefix=None, suffix=None):
self.ctx_means = np.empty((self.n_cls, self.n_tokens, self.ctx_dim)) # (n_cls,l=77,d=1024)
self.ctx_stds = np.empty(self.ctx_means.shape) # (n_cls,l=77,d=1024)
self.prompt_texts = []
for cls in tqdm(range(self.n_cls), desc="fit prompt learner distribution"):
cls_ctx = self.ctx[cls].unsqueeze(0) # (1,p,l,d)
if self.use_classname:
cls_prompts = self.concat(cls_ctx, [cls])
else:
cls_prompts = self.concat_custom(cls_ctx, prefix=prefix, suffix=suffix)
cls_prompts = cls_prompts[0]
self.ctx_means[cls] = cls_prompts.mean(dim=0).detach().cpu().numpy()
self.ctx_stds[cls] = cls_prompts.std(dim=0).detach().cpu().numpy()
prompt_text = self.interpret(cls)
self.prompt_texts.append(prompt_text)
"""
After prompts are fit, sample from stored ctx_means and ctx_stds and feed to text encoder as input to diffusion
If not prompt distribution learning, return self.forward
Experimental use only
"""
def sample_ctx(self, cls_idx, interpret=False):
assert self.ctx_means is not None and self.ctx_stds is not None
prompt = torch.from_numpy(normal(self.ctx_means[cls_idx], self.ctx_stds[cls_idx])).unsqueeze(dim=0)
prompt = self.text_encoder(prompt.to(dtype=self.dtype, device=self.embedder.position_ids.device))
if interpret:
caption = self.prompt_texts[cls_idx]
return prompt, caption
return prompt
# Path: utils/imagenet_classes.py
# Path: utils/fit_prompt_learner.py
import torch
import numpy as np
import argparse
import os
import sys
from prompt_learner import PromptLearner
from utils.imagenet_classes import wnid2classname_simple as classnames
# Fit distribution of learned prompt collections
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../SDCoOp"))
classids = list(classnames.keys())
classnames = list(classnames.values())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--weights_path", type=str, help=".pt ckpt path")
parser.add_argument("--output_path", type=str, help=".npz save path")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default="stabilityai/stable-diffusion-2",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--cls_pos",
type=str,
default="end",
choices=["end", "middle", "front"],
help="Position of class name token in prompt"
)
parser.add_argument(
"--use_classname",
default=True,
action="store_true",
help="Use class names in prompt learner"
)
args = parser.parse_args()
| return args |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amazon-science/RefChecker
# Path: refchecker/extractor/claude2_extractor.py
class Claude2Extractor(ExtractorBase):
def __init__(
self,
claim_format:str='triplet'
) -> None:
super().__init__(claim_format=claim_format)
if self.claim_format == 'triplet':
self.prompt_temp_wq = CLAUDE2_TRIPLET_EXTRACTION_PROMPT_Q
self.prompt_temp = CLAUDE2_TRIPLET_EXTRACTION_PROMPT
def extract_claim_triplets(self, response, question=None, max_new_tokens=500):
if question is None:
prompt = self.prompt_temp.format(
input_text=response
)
else:
prompt = self.prompt_temp_wq.format(
q=question,
a=response
)
claude2_response = get_claude2_response(
prompt=prompt,
temperature=0,
max_new_tokens=max_new_tokens
)
if claude2_response and len(claude2_response):
kg_str = None
if '###' in claude2_response:
kg_str = claude2_response[:claude2_response.index('###')]
else:
kg_str = claude2_response
triplets = self._parse_claim_triplets(kg_str)
return triplets
return []
# Path: refchecker/extractor/gpt4_extractor.py
class GPT4Extractor(ExtractorBase):
def __init__(
self,
claim_format:str='triplet'
) -> None:
super().__init__(claim_format=claim_format)
if self.claim_format == 'triplet':
self.prompt_temp_wq = GPT4_TRIPLET_EXTRACTION_PROMPT_Q
self.prompt_temp = GPT4_TRIPLET_EXTRACTION_PROMPT
def extract_claim_triplets(self, response, question=None, max_new_tokens=500):
if question is None:
prompt = self.prompt_temp.format(
input_text=response
)
else:
prompt = self.prompt_temp_wq.format(
q=question,
a=response
)
gpt4_response = get_openai_model_response(
prompt,
temperature=0,
model='gpt-4',
max_new_tokens=max_new_tokens
)
if gpt4_response and len(gpt4_response):
kg_str = None
if '###' in gpt4_response:
kg_str = gpt4_response[:gpt4_response.index('###')]
else:
kg_str = gpt4_response
triplets = self._parse_claim_triplets(kg_str)
return triplets
return []
# Path: refchecker/checker/claude2_checker.py
class Claude2Checker(CheckerBase):
def __init__(self) -> None:
super().__init__()
self.prompt_temp = CLAUDE2_CHECKING_PROMPT
self.prompt_temp_wq = CLAUDE2_CHECKING_PROMPT_Q
def _check(
self,
claims: List,
references: List,
response: str,
question: str,
):
ret_labels = []
for claim, reference in zip(claims, references):
if isinstance(claim, list):
assert len(claim) == 3
claim = f"({claim[0]}, {claim[1]}, {claim[2]})"
if question is None:
prompt = self.prompt_temp.format(
reference=reference,
claim=claim
)
else:
prompt = self.prompt_temp_wq.format(
question=question,
reference=reference,
claim=claim
)
claude2_response = get_claude2_response(
prompt=prompt,
temperature=0,
max_new_tokens=6
)
if claude2_response and len(claude2_response):
label = None
if self.label_contradiction.lower() in claude2_response.lower():
label = self.label_contradiction
elif self.label_entailment.lower() in claude2_response.lower():
label = self.label_entailment
else:
label = self.label_neutral
ret_labels.append(label)
else:
raise 'Claude 2 API returns None or empty string'
return ret_labels
# Path: refchecker/checker/gpt4_checker.py
class GPT4Checker(CheckerBase):
def __init__(self) -> None:
super().__init__()
self.prompt_temp = GPT4_CHECKING_PROMPT
self.prompt_temp_wq = GPT4_CHECKING_PROMPT_Q
def _check(
self,
claims: List,
references: List,
response: str,
question: str,
):
ret_labels = []
for claim, reference in zip(claims, references):
if isinstance(claim, list):
assert len(claim) == 3
claim = f"({claim[0]}, {claim[1]}, {claim[2]})"
if question is None:
prompt = self.prompt_temp.format(
reference=reference,
claim=claim
)
else:
prompt = self.prompt_temp_wq.format(
question=question,
reference=reference,
claim=claim
)
openai_response = get_openai_model_response(
prompt=prompt,
temperature=0,
model='gpt-4'
)
if openai_response and len(openai_response):
label = None
if self.label_contradiction.lower() in openai_response.lower():
label = self.label_contradiction
elif self.label_entailment.lower() in openai_response.lower():
label = self.label_entailment
else:
label = self.label_neutral
ret_labels.append(label)
else:
raise 'OpenAI API returns None or empty string'
return ret_labels
# Path: refchecker/checker/nli_checker.py
class NLIChecker(CheckerBase):
def __init__(
self,
model='ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli',
device=0
):
super().__init__()
self.model = AutoModelForSequenceClassification.from_pretrained(model).to(device)
self.model.eval()
self.tokenizer = AutoTokenizer.from_pretrained(model)
self.device = device
@torch.no_grad()
def _check(
self,
claims: List,
references: List,
response: str,
question: str,
):
N1, N2 = len(references), len(claims)
assert N1 == N2, f"Batches must be of the same length. {N1} != {N2}"
if isinstance(claims[0], list):
assert len(claims[0]) == 3
claims = [f"{c[0]} {c[1]} {c[2]}" for c in claims]
inputs = self.tokenizer(
references, claims, max_length=512, truncation=True,
return_tensors="pt", padding=True, return_token_type_ids=True
)
inputs = {k: v.to(self.device) for k, v in inputs.items()}
output = self.model(**inputs).logits.softmax(dim=-1).cpu() # [N, 3]
preds = output.argmax(dim=-1)
ret = [LABELS[p] for p in preds]
return ret
# Path: refchecker/retriever/google_retriever.py
class GoogleRetriever:
def __init__(self, cache_dir: str = "./.cache"):
self.bm25 = None
self._load_key()
cache_dir = os.path.join(cache_dir, "serper")
self.cache = diskcache.Cache(cache_dir)
def _load_key(self):
self.api_key = os.environ.get("SERPER_API_KEY", None)
assert self.api_key is not None, \
f"Require environment variable SERPER_API_KEY."
def _query_google(self, query: str) -> dict:
"""Search Google using Serper API and retrieve abundant information"""
if query in self.cache:
return self.cache[query]
else:
payload = json.dumps({"q": query})
headers = {
"X-API-KEY": self.api_key,
"Content-Type": "application/json"
}
response = requests.request(
"POST", SERPER_URL, headers=headers, data=payload
)
response_dict = json.loads(response.text)
self.cache[query] = response_dict
return response_dict
def _get_queries(self, paragraph: str) -> List[str]:
"""Use LLM to generate query to search on the Internet to get relevant
information. Currently only single query is generated."""
prompt = PROMPT_FOR_QUERY_GEN % paragraph
query = get_openai_model_response(prompt, temperature=0)
if query is None:
raise RuntimeError(
"Retriever: Empty response from LLM for query generation."
)
return [query.strip()]
@staticmethod
def _parse_results(results: dict) -> Tuple[List[dict], bool]:
"""Adapted from `FacTool` to utilize retrieved results as answers."""
snippets = []
with_answerbox = False
if results.get("answerBox"):
# This case indicates that Google has made a good answer to the question, and it's as desired to utilize this information.
answer_box: dict = results.get("answerBox", {})
if answer_box.get("answer"):
element = {
"content": answer_box.get("answer"),
"source": answer_box.get("link"),
}
snippets = [element]
elif answer_box.get("snippet"):
element = {
"content": answer_box.get("snippet").replace("\n", " "),
"source": answer_box.get("link"),
}
snippets = [element]
elif answer_box.get("snippetHighlighted"):
element = {
"content": answer_box.get("snippetHighlighted"),
"source": answer_box.get("link"),
}
snippets = [element]
if len(snippets) > 0:
with_answerbox = True
if results.get("knowledgeGraph"):
kg: dict = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
element = {
"content": f"{title}: {entity_type}",
"source": kg.get("link"),
}
snippets.append(element)
description = kg.get("description")
if description:
element = {"content": description, "source": kg.get("link")}
snippets.append(element)
for attribute, value in kg.get("attributes", {}).items():
element = {"content": f"{attribute}: {value}", "source": kg.get("link")}
snippets.append(element)
# TODO: set num of parsing link in parameters
for result in results["organic"][:3]:
if "snippet" in result:
element = {"content": result["snippet"], "source": result["link"]}
snippets.append(element)
for attribute, value in result.get("attributes", {}).items():
element = {"content": f"{attribute}: {value}", "source": result["link"]}
snippets.append(element)
if len(snippets) == 0:
warnings.warn("No usable google search results.")
return snippets, with_answerbox
@staticmethod
def _get_url_text(url) -> str:
# Read page and return text
buf = []
try:
soup = BeautifulSoup(
requests.get(url, timeout=10).text, "html.parser"
)
for p in soup.find_all("p"):
pt = p.get_text()
if len(buf) == 0 or pt not in buf[-1]:
buf.append(pt)
return "\n".join(buf)
except:
return ""
@staticmethod
def _split_doc(
text: str,
max_words_per_paragrpah=384,
short_paragraph_threshold=96,
preserve_threshold=8,
) -> List[str]:
"""Use spacy to split a document to paragraphs."""
paras = text.splitlines()
splitted = []
sent_to_be_concat = ""
accumulate_length = 0
for p in paras:
p = p.strip()
if len(p) < 1:
continue # empty lines
sents = sentencize(p)
for sent in sents:
if accumulate_length + len(sent) <= max_words_per_paragrpah:
sent_to_be_concat += sent.text_with_ws
accumulate_length += len(sent)
else:
splitted.append(sent_to_be_concat)
sent_to_be_concat = sent.text_with_ws
accumulate_length = len(sent)
if accumulate_length <= short_paragraph_threshold:
sent_to_be_concat += " "
else:
splitted.append(sent_to_be_concat)
sent_to_be_concat = ""
accumulate_length = 0
if accumulate_length >= preserve_threshold:
splitted.append(sent_to_be_concat)
return splitted
def _process_retrieved_docs(
self,
docs: List[dict],
query: str,
best_k=8,
max_words_per_paragraph=384,
skip_repeated_corpus=True,
) -> List[Dict[str, Union[str, None]]]: # {"content": <text>, "url": <url>}
if len(docs) == 0:
return None
if len(docs) == 1:
return docs
else:
links_dict = {}
corpus, links = [], [] # List of documents
# retrieve through the links
for relevance in docs:
url = relevance["source"]
if "youtube" in url:
continue # skip youtube due to slow fetching
if url in links_dict.keys():
if skip_repeated_corpus:
continue
online_text = links_dict[url]
else:
online_text = self._get_url_text(url)
links_dict[url] = online_text
splitted_text = self._split_doc(
online_text, max_words_per_paragraph
)
corpus.extend(splitted_text)
links.extend([url] * len(splitted_text))
meta_doc_dict = dict(zip(corpus, links))
tokenized_corpus = [doc.split(" ") for doc in corpus]
bm25 = rank_bm25.BM25Okapi(tokenized_corpus)
best_docs = bm25.get_top_n(query.split(), corpus, n=best_k)
return [
{"content": k, "source": meta_doc_dict[k]}
for k in best_docs
]
def retrieve(
self,
text: str,
top_k=3,
max_words_per_paragraph=384
) -> List[Dict[str, Union[str, None]]]:
"""
Search reference documents on the Internet based on LLM generated query.
Parameters
----------
text : str
Text to be checked.
top_k : int
Number of reference documents to be retrieved.
max_words_per_paragraph : int
Maximum number of words in each reference document.
Returns
-------
List[str]
List of reference documents
"""
# Step 1. Generate queries for searching using LLM.
queries = self._get_queries(text)
# Step 2. Search google with the queries.
relevant_info_dicts, best_docs_all = [], []
for q in queries:
searched_results = self._query_google(q)
parsed_results, with_answerbox = self._parse_results(
searched_results
)
if with_answerbox:
answerbox_answer, parsed_results = (
parsed_results[0],
parsed_results[1:],
)
relevant_info_dicts.extend(parsed_results)
best_docs = self._process_retrieved_docs(
relevant_info_dicts,
q,
best_k=math.ceil((top_k - with_answerbox) / len(queries)),
max_words_per_paragraph=max_words_per_paragraph,
skip_repeated_corpus=True,
)
if with_answerbox:
best_docs.insert(0, answerbox_answer)
best_docs_all.extend(best_docs)
refs = [
doc["content"] for doc in best_docs_all
]
return refs
# Path: refchecker/aggregator.py
def strict_agg(results):
"""Aggregate results by zero-tolerance on negative labels."""
if not results:
return "Abstain"
for result in results:
if result == "Contradiction":
return "Contradiction"
if result == "Neutral":
return "Neutral"
return "Entailment"
# Path: refchecker/aggregator.py
def soft_agg(results):
"""Aggregate results by taking the ratio of each category."""
if not results:
return {
"Entailment": 0.0,
"Neutral": 0.0,
"Contradiction": 0.0,
"Abstain": 1.0,
}
total = len(results)
agg = {
"Entailment": 0.0,
"Neutral": 0.0,
"Contradiction": 0.0,
"Abstain": 0.0,
}
for result in results:
agg[result] += 1.0
for key in agg:
agg[key] /= total
return agg
# Path: refchecker/aggregator.py
def major_agg(results):
"""Aggregate results by majority vote."""
if not results:
return "Abstain"
agg = Counter(results)
return agg.most_common(1)[0][0]
# Path: refchecker/cli.py
import os
import json
from argparse import ArgumentParser, RawTextHelpFormatter
from tqdm import tqdm
from .extractor import Claude2Extractor, GPT4Extractor
from .checker import Claude2Checker, GPT4Checker, NLIChecker
from .retriever import GoogleRetriever
from .aggregator import strict_agg, soft_agg, major_agg
help="Path to the Anthropic api key file. Required if the Anthropic "
"Claude2 api is used."
)
parser.add_argument(
"--aws_bedrock_region", type=str, default="",
help="AWS region where the Amazon Bedrock api is deployed. Required if "
"the Amazon Bedrock api is used."
)
parser.add_argument(
"--use_retrieval", action="store_true",
help="Whether to use retrieval to find the reference for checking. "
"Required if the reference\nfield in input data is not provided."
)
parser.add_argument(
"--serper_api_key", type=str, default="",
help="Path to the serper api key file. Required if the google retriever"
" is used."
)
return parser.parse_args()
def main():
args = get_args()
# set environment variables
if args.openai_key:
with open(args.openai_key, "r") as fp:
os.environ["OPENAI_API_KEY"] = fp.read().strip()
if args.anthropic_key:
with open(args.anthropic_key, "r") as fp:
os.environ["ANTHROPIC_API_KEY"] = fp.read().strip()
if args.aws_bedrock_region:
os.environ["aws_bedrock_region"] = args.aws_bedrock_region
if args.serper_api_key:
os.environ["SERPER_API_KEY"] = args.serper_api_key
if args.mode == "extract":
extract(args)
elif args.mode == "check":
check(args)
elif args.mode == "extract-check":
output_path = args.output_path
args.output_path = output_path + ".temp"
extract(args)
args.input_path = args.output_path
args.output_path = output_path
check(args)
else:
raise NotImplementedError
def extract(args):
# initialize models
if args.extractor_name == "claude2":
extractor = Claude2Extractor()
elif args.extractor_name == "gpt4":
extractor = GPT4Extractor()
else:
raise NotImplementedError
# load data
with open(args.input_path, "r") as fp:
input_data = json.load(fp)
# extract triplets
print('Extracting')
output_data = []
for item in tqdm(input_data):
assert "response" in item, "response field is required"
response = item["response"]
question = item.get("question", None)
triplets = extractor.extract_claim_triplets(response, question, max_new_tokens=args.extractor_max_new_tokens)
out_item = {**item, **{"triplets": triplets}}
output_data.append(out_item)
with open(args.output_path, "w") as fp:
json.dump(output_data, fp, indent=2)
def check(args):
# initialize models
if args.checker_name == "claude2":
checker = Claude2Checker()
elif args.checker_name == "gpt4":
checker = GPT4Checker()
elif args.checker_name == "nli":
checker = NLIChecker()
else:
raise NotImplementedError
retriever = None
if args.use_retrieval:
if args.retriever_name == "google":
retriever = GoogleRetriever(args.cache_dir)
else:
raise NotImplementedError
if args.aggregator_name == "strict":
agg_fn = strict_agg
elif args.aggregator_name == "soft":
agg_fn = soft_agg
elif args.aggregator_name == "major":
agg_fn = major_agg
else:
raise NotImplementedError
# load data
with open(args.input_path, "r") as fp:
input_data = json.load(fp)
# check triplets
print('Checking')
output_data = []
for item in tqdm(input_data):
assert "triplets" in item, "triplets field is required"
triplets = item["triplets"]
if args.use_retrieval:
reference = retriever.retrieve(item["response"])
item["reference"] = reference
else:
assert "reference" in item, \
| "reference field is required if retriever is not used." |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RenShuhuai-Andy/TimeChat
# Path: timechat/common/registry.py
class Registry:
def register_builder(cls, name):
def wrap(builder_cls):
def register_task(cls, name):
def wrap(task_cls):
def register_model(cls, name):
def wrap(model_cls):
def register_processor(cls, name):
def wrap(processor_cls):
def register_lr_scheduler(cls, name):
def wrap(lr_sched_cls):
def register_runner(cls, name):
def wrap(runner_cls):
def register_path(cls, name, path):
def register(cls, name, obj):
def get_builder_class(cls, name):
def get_model_class(cls, name):
def get_task_class(cls, name):
def get_processor_class(cls, name):
def get_lr_scheduler_class(cls, name):
def get_runner_class(cls, name):
def list_runners(cls):
def list_models(cls):
def list_tasks(cls):
def list_processors(cls):
def list_lr_schedulers(cls):
def list_datasets(cls):
def get_path(cls, name):
def get(cls, name, default=None, no_warning=False):
def unregister(cls, name):
# Path: timechat/datasets/builders/base_dataset_builder.py
class BaseDatasetBuilder:
train_dataset_cls, eval_dataset_cls = None, None
def __init__(self, cfg=None):
super().__init__()
if cfg is None:
# help to create datasets from default config.
self.config = load_dataset_config(self.default_config_path())
elif isinstance(cfg, str):
self.config = load_dataset_config(cfg)
else:
# when called from task.build_dataset()
self.config = cfg
self.data_type = self.config.data_type
self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
def build_datasets(self):
# download, split, etc...
# only called on 1 GPU/TPU in distributed
if is_main_process():
self._download_data()
if is_dist_avail_and_initialized():
dist.barrier()
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
datasets = self.build() # dataset['train'/'val'/'test']
return datasets
def build_processors(self):
vis_proc_cfg = self.config.get("vis_processor")
txt_proc_cfg = self.config.get("text_processor")
if vis_proc_cfg is not None:
vis_train_cfg = vis_proc_cfg.get("train")
vis_eval_cfg = vis_proc_cfg.get("eval")
self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg)
self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg)
if txt_proc_cfg is not None:
txt_train_cfg = txt_proc_cfg.get("train")
txt_eval_cfg = txt_proc_cfg.get("eval")
self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg)
self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg)
@staticmethod
def _build_proc_from_cfg(cfg):
return (
registry.get_processor_class(cfg.name).from_config(cfg)
if cfg is not None
else None
)
@classmethod
def default_config_path(cls, type="default"):
return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])
def _download_data(self):
self._download_ann()
self._download_vis()
def _download_ann(self):
"""
Download annotation files if necessary.
All the vision-language datasets should have annotations of unified format.
storage_path can be:
(1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.
(2) basename/dirname: will be suffixed with base name of URL if dirname is provided.
Local annotation paths should be relative.
"""
anns = self.config.build_info.annotations
splits = anns.keys()
cache_root = registry.get_path("cache_root")
for split in splits:
info = anns[split]
urls, storage_paths = info.get("url", None), info.storage
if isinstance(urls, str):
urls = [urls]
if isinstance(storage_paths, str):
storage_paths = [storage_paths]
assert len(urls) == len(storage_paths)
for url_or_filename, storage_path in zip(urls, storage_paths):
# if storage_path is relative, make it full by prefixing with cache_root.
if not os.path.isabs(storage_path):
storage_path = os.path.join(cache_root, storage_path)
dirname = os.path.dirname(storage_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isfile(url_or_filename):
src, dst = url_or_filename, storage_path
if not os.path.exists(dst):
shutil.copyfile(src=src, dst=dst)
else:
logging.info("Using existing file {}.".format(dst))
else:
if os.path.isdir(storage_path):
# if only dirname is provided, suffix with basename of URL.
raise ValueError(
"Expecting storage_path to be a file path, got directory {}".format(
storage_path
)
)
else:
filename = os.path.basename(storage_path)
download_url(url=url_or_filename, root=dirname, filename=filename)
def _download_vis(self):
storage_path = self.config.build_info.get(self.data_type).storage
storage_path = utils.get_cache_path(storage_path)
if not os.path.exists(storage_path):
warnings.warn(
f"""
The specified path {storage_path} for visual inputs does not exist.
Please provide a correct path to the visual inputs or
refer to datasets/download_scripts/README.md for downloading instructions.
"""
)
def build(self):
"""
Create by split datasets inheriting torch.utils.data.Datasets.
# build() can be dataset-specific. Overwrite to customize.
"""
self.build_processors()
build_info = self.config.build_info
ann_info = build_info.annotations
vis_info = build_info.get(self.data_type)
datasets = dict()
for split in ann_info.keys():
if split not in ["train", "val", "test"]:
continue
is_train = split == "train"
# processors
vis_processor = (
self.vis_processors["train"]
if is_train
else self.vis_processors["eval"]
)
text_processor = (
self.text_processors["train"]
if is_train
else self.text_processors["eval"]
)
# annotation path
ann_paths = ann_info.get(split).storage
if isinstance(ann_paths, str):
ann_paths = [ann_paths]
abs_ann_paths = []
for ann_path in ann_paths:
if not os.path.isabs(ann_path):
ann_path = utils.get_cache_path(ann_path)
abs_ann_paths.append(ann_path)
ann_paths = abs_ann_paths
# visual data storage path
vis_path = os.path.join(vis_info.storage, split)
if not os.path.isabs(vis_path):
# vis_path = os.path.join(utils.get_cache_path(), vis_path)
vis_path = utils.get_cache_path(vis_path)
if not os.path.exists(vis_path):
warnings.warn("storage path {} does not exist.".format(vis_path))
# create datasets
dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls
datasets[split] = dataset_cls(
vis_processor=vis_processor,
text_processor=text_processor,
ann_paths=ann_paths,
vis_root=vis_path,
)
return datasets
# Path: timechat/datasets/datasets/laion_dataset.py
class LaionDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, location):
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
self.inner_dataset = wds.DataPipeline(
wds.ResampledShards(location),
wds.tarfile_to_samples(handler=wds.warn_and_continue),
wds.shuffle(1000, handler=wds.warn_and_continue),
wds.decode("pilrgb", handler=wds.warn_and_continue),
wds.to_tuple("jpg", "json", handler=wds.warn_and_continue),
wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),
wds.map(self.to_dict, handler=wds.warn_and_continue),
)
def to_dict(self, sample):
return {
"image": sample[0],
"text_input": self.text_processor(sample[1]["caption"]),
}
# Path: timechat/datasets/datasets/llava_instruct_dataset.py
class Instruct_Dataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_root,num_video_query_token=32,tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/',data_type = 'image', model_type='vicuna'):
"""
vis_root (string): Root directory of Llava images (e.g. webvid_eval/video/)
ann_root (string): Root directory of video (e.g. webvid_eval/annotations/)
split (string): val or test
"""
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
data_path = pathlib.Path(ann_root)
with data_path.open(encoding='utf-8') as f:
self.annotation = json.load(f)
self.vis_root = vis_root
self.resize_size = 224
self.num_frm = 8
self.tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name, use_fast=False)
self.tokenizer.pad_token = self.tokenizer.unk_token
self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.num_video_query_token = num_video_query_token
self.IMAGE_PATCH_TOKEN_ID = self.tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN]
self.transform = AlproVideoTrainProcessor(
image_size=self.resize_size, n_frms = self.num_frm
).transform
self.data_type = data_type
self.model_type = model_type
def _get_image_path(self, sample):
rel_video_fp ='COCO_train2014_' + sample['image']
full_video_fp = os.path.join(self.vis_root, rel_video_fp)
return full_video_fp
def __getitem__(self, index):
num_retries = 10 # skip error videos
for _ in range(num_retries):
try:
sample = self.annotation[index]
image_path = self._get_image_path(sample)
conversation_list = sample['conversations']
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
# text = self.text_processor(text)
sources = preprocess_multimodal(copy.deepcopy(conversation_list), None, cur_token_len=self.num_video_query_token)
if self.model_type =='vicuna':
data_dict = preprocess(
sources,
self.tokenizer)
elif self.model_type =='llama_v2':
data_dict = preprocess_for_llama_v2(
sources,
self.tokenizer)
else:
print('not support')
raise('not support')
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
# image exist in the data
data_dict['image'] = image
except:
print(f"Failed to load examples with image: {image_path}. "
f"Will randomly sample an example as a replacement.")
index = random.randint(0, len(self) - 1)
continue
break
else:
raise RuntimeError(f"Failed to fetch image after {num_retries} retries.")
# "image_id" is kept to stay compatible with the COCO evaluation format
return {
"image": image,
"text_input": data_dict["input_ids"],
"labels": data_dict["labels"],
"type":'image',
}
def __len__(self):
return len(self.annotation)
def collater(self, instances):
input_ids, labels = tuple([instance[key] for instance in instances]
for key in ("text_input", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
if 'image' in instances[0]:
images = [instance['image'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in images):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
batch['conv_type'] = 'multi'
return batch
# Path: timechat/datasets/datasets/video_instruct_dataset.py
class Video_Instruct_Dataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_root, num_video_query_token=32,
tokenizer_name='/mnt/workspace/ckpt/vicuna-13b/', data_type='video', model_type='vicuna', num_frm=8,
sample_type='rand', max_txt_len=512, stride=32):
"""
vis_root (string): Root directory of Llava images (e.g. webvid_eval/video/)
ann_root (string): Root directory of video (e.g. webvid_eval/annotations/)
split (string): val or test
"""
super().__init__(vis_processor=vis_processor, text_processor=text_processor)
data_path = pathlib.Path(ann_root)
with data_path.open(encoding='utf-8') as f:
self.annotation = json.load(f)
self.num_video_query_token = num_video_query_token
self.vis_root = vis_root
self.resize_size = 224
self.num_frm = num_frm
self.tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name, use_fast=False)
self.tokenizer.pad_token = self.tokenizer.unk_token
self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.IMAGE_PATCH_TOKEN_ID = self.tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN]
self.transform = AlproVideoTrainProcessor(
image_size=self.resize_size, n_frms=self.num_frm
).transform
self.data_type = data_type
self.model_type = model_type
self.sample_type = sample_type
self.max_txt_len = max_txt_len
self.stride = stride
def _get_video_path(self, sample):
rel_video_fp = sample['video']
full_video_fp = os.path.join(self.vis_root, rel_video_fp)
return full_video_fp
def __getitem__(self, index):
num_retries = 10 # skip error videos
for _ in range(num_retries):
try:
sample = self.annotation[index]
video_path = self._get_video_path(sample)
conversation_list = sample['QA']
video, msg = load_video(
video_path=video_path,
n_frms=self.num_frm,
height=self.resize_size,
width=self.resize_size,
sampling=self.sample_type, return_msg=True
)
video = self.transform(video)
if 'cn' in self.data_type:
msg = ""
# 添加视频<DEFAULT_IMAGE_PATCH_TOKEN>,以及msg到convsation list 0
cur_n_frm = video.shape[1]
cur_token_len = self.num_video_query_token * math.ceil(
cur_n_frm / self.stride) if self.stride > 0 else self.num_video_query_token
sources = preprocess_multimodal(copy.deepcopy(conversation_list), None, cur_token_len=cur_token_len,
msg=msg)
new_sources = convert_source_vicuna_format(sources)
if index == 0:
print(new_sources)
if self.model_type == 'vicuna':
data_dict = preprocess(
new_sources,
self.tokenizer,
self.max_txt_len
)
elif self.model_type == 'llama_v2':
data_dict = preprocess_for_llama_v2(
new_sources,
self.tokenizer,
self.max_txt_len
)
else:
print('not support')
raise ('not support')
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
# image exist in the data
data_dict['image'] = video
# timestamp
all_timestamps = msg.split('at')[1].replace('seconds.', '').strip().split(
',') # extract timestamps from msg
all_timestamps = [f'This frame is sampled at {t.strip()} second.' for t in all_timestamps]
all_timestamps = self.tokenizer(
all_timestamps,
return_tensors="pt",
padding="longest",
max_length=32,
truncation=True,
)
data_dict['timestamps'] = all_timestamps
except:
print(f"Failed to load examples with video: {video_path}. "
f"Will randomly sample an example as a replacement.")
index = random.randint(0, len(self) - 1)
continue
break
else:
raise RuntimeError(f"Failed to fetch video after {num_retries} retries.")
# "image_id" is kept to stay compatible with the COCO evaluation format
return {
"image": video,
"text_input": data_dict["input_ids"],
"labels": data_dict["labels"],
"type": 'video',
"timestamps": data_dict['timestamps'],
}
def __len__(self):
return len(self.annotation)
def collater(self, instances):
input_ids, labels, timestamps = tuple([instance[key] for instance in instances]
for key in ("text_input", "labels", "timestamps"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
if 'image' in instances[0]:
images = [instance['image'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in
images): # nb of frames of all videos is ${num_frm}
batch['images'] = torch.stack(images)
timestamps_input_ids, timestamps_attention_mask = [], []
for timestamp in timestamps:
n_frm = timestamp['input_ids'].shape[0]
for i in range(n_frm):
timestamps_input_ids.append(timestamp['input_ids'][i])
timestamps_attention_mask.append(timestamp['attention_mask'][i])
timestamps_input_ids = torch.nn.utils.rnn.pad_sequence(
timestamps_input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
timestamps_attention_mask = torch.nn.utils.rnn.pad_sequence(
timestamps_attention_mask,
batch_first=True,
padding_value=0)
batch['timestamps'] = {'input_ids': timestamps_input_ids, 'attention_mask': timestamps_attention_mask}
else: # nb of frames of some videos is less than ${num_frm}
print(f'image shape not match: {[x.shape for x in images]}')
batch['images'] = images
batch_timestamps = []
for timestamp in timestamps:
batch_timestamps.append(
{'input_ids': timestamp['input_ids'], 'attention_mask': timestamp['attention_mask']})
batch['timestamps'] = batch_timestamps
batch['conv_type'] = 'multi'
return batch
# Path: timechat/datasets/builders/instruct_builder.py
import os
import logging
import warnings
from timechat.common.registry import registry
from timechat.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from timechat.datasets.datasets.laion_dataset import LaionDataset
from timechat.datasets.datasets.llava_instruct_dataset import Instruct_Dataset
from timechat.datasets.datasets.video_instruct_dataset import Video_Instruct_Dataset
@registry.register_builder("image_instruct")
class Image_Instruct_Builder(BaseDatasetBuilder):
train_dataset_cls = Instruct_Dataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/instruct/defaults.yaml"}
def _download_ann(self):
pass
def _download_vis(self):
pass
def build(self):
self.build_processors()
datasets = dict()
split = "train"
build_info = self.config.build_info
dataset_cls = self.train_dataset_cls
if self.config.num_video_query_token:
num_video_query_token = self.config.num_video_query_token
else:
num_video_query_token = 32
if self.config.tokenizer_name:
tokenizer_name = self.config.tokenizer_name
else:
tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/'
model_type = self.config.model_type if self.config.model_type else 'vicuna'
datasets[split] = dataset_cls(
vis_processor=self.vis_processors[split],
text_processor=self.text_processors[split],
vis_root=build_info.videos_dir,
ann_root=build_info.anno_dir,
num_video_query_token=num_video_query_token,
tokenizer_name=tokenizer_name,
data_type=self.config.data_type,
model_type=model_type,
)
return datasets
@registry.register_builder("video_instruct")
class Video_Instruct_Builder(BaseDatasetBuilder):
train_dataset_cls = Video_Instruct_Dataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/instruct/defaults.yaml"}
def _download_ann(self):
pass
def _download_vis(self):
pass
def build(self):
self.build_processors()
datasets = dict()
split = "train"
build_info = self.config.build_info
dataset_cls = self.train_dataset_cls
if self.config.num_video_query_token:
num_video_query_token = self.config.num_video_query_token
else:
num_video_query_token = 32
if self.config.tokenizer_name:
tokenizer_name = self.config.tokenizer_name
else:
tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/'
model_type = self.config.model_type if self.config.model_type else 'vicuna'
num_frm = self.config.num_frm if self.config.num_frm else 8
sample_type = self.config.sample_type if self.config.sample_type else 'uniform'
max_txt_len = self.config.max_txt_len if self.config.max_txt_len else 512
stride = self.config.stride if self.config.stride else 0
datasets[split] = dataset_cls(
vis_processor=self.vis_processors[split],
text_processor=self.text_processors[split],
vis_root=build_info.videos_dir,
ann_root=build_info.anno_dir,
| num_video_query_token=num_video_query_token,
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Meituan-AutoML/Lenna
# Path: model/llava/model/llava_arch.py
class LlavaMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower() # CLIPVisionTower
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if (
past_key_values is not None
and vision_tower is not None
and images is not None
and input_ids.shape[1] == 1
):
attention_mask = torch.ones(
(attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1),
dtype=attention_mask.dtype,
device=attention_mask.device,
)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # False
# multimodal LLM, but the current sample is not multimodal
cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)
cur_input_embeds = (
cur_input_embeds
+ (
0.0 * self.get_model().mm_projector(vision_tower.dummy_feature)
).sum()
)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
self.config, "mm_use_im_start_end", False
):
cur_new_input_embeds.append(
self.get_model()
.embed_tokens(cur_input_ids[: image_token_start - 1])
.detach()
)
cur_new_input_embeds.append(
self.get_model().embed_tokens(
cur_input_ids[image_token_start - 1 : image_token_start]
)
)
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(
self.get_model().embed_tokens(
cur_input_ids[image_token_start + 1 : image_token_start + 2]
)
)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(
torch.full(
(cur_image_features.shape[0],),
IGNORE_INDEX,
device=labels.device,
dtype=labels.dtype,
)
)
cur_new_labels.append(
cur_labels[image_token_start : image_token_start + 1]
)
cur_labels = cur_labels[image_token_start + 2 :]
elif getattr(self.config, "mm_use_im_start_end", False):
cur_new_input_embeds.append(
self.get_model().embed_tokens(cur_input_ids[:image_token_start])
)
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(
self.get_model().embed_tokens(
cur_input_ids[image_token_start + 1 : image_token_start + 2]
)
)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(
torch.full(
(cur_image_features.shape[0],),
IGNORE_INDEX,
device=labels.device,
dtype=labels.dtype,
)
)
cur_new_labels.append(
cur_labels[image_token_start + 1 : image_token_start + 2]
)
cur_labels = cur_labels[image_token_start + 2 :]
else:
cur_new_input_embeds.append(
self.get_model().embed_tokens(cur_input_ids[:image_token_start])
)
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(
torch.full(
(cur_image_features.shape[0],),
IGNORE_INDEX,
device=labels.device,
dtype=labels.dtype,
)
)
cur_labels = cur_labels[image_token_start + 1 :]
cur_image_idx += 1
if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
self.config, "mm_use_im_start_end", False
):
cur_input_ids = cur_input_ids[image_token_start + 2 :]
elif getattr(self.config, "mm_use_im_start_end", False):
cur_input_ids = cur_input_ids[image_token_start + 2 :]
else:
cur_input_ids = cur_input_ids[image_token_start + 1 :]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
self.config, "mm_use_im_start_end", False
):
cur_new_input_embeds.append(
self.get_model().embed_tokens(cur_input_ids).detach()
)
elif getattr(self.config, "mm_use_im_start_end", False):
cur_new_input_embeds.append(
self.get_model().embed_tokens(cur_input_ids)
)
else:
cur_new_input_embeds.append(
self.get_model().embed_tokens(cur_input_ids)
)
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [
x.to(device=self.device) for x in cur_new_input_embeds
]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat(
(
cur_new_embed,
torch.zeros(
(max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]),
dtype=cur_new_embed.dtype,
device=cur_new_embed.device,
),
),
dim=0,
)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat(
(
cur_new_label,
torch.full(
(max_len - cur_new_label.shape[0],),
IGNORE_INDEX,
dtype=cur_new_label.dtype,
device=cur_new_label.device,
),
),
dim=0,
)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(
attention_mask, _new_labels, new_labels
):
new_attn_mask_pad_left = torch.full(
(cur_new_labels.shape[0] - labels.shape[1],),
True,
dtype=attention_mask.dtype,
device=attention_mask.device,
)
new_attn_mask_pad_right = torch.full(
(cur_new_labels_align.shape[0] - cur_new_labels.shape[0],),
False,
dtype=attention_mask.dtype,
device=attention_mask.device,
)
cur_new_attention_mask = torch.cat(
(
new_attn_mask_pad_left,
cur_attention_mask,
new_attn_mask_pad_right,
),
dim=0,
)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full(
(
attention_mask.shape[0],
new_input_embeds.shape[1] - input_ids.shape[1],
),
True,
dtype=attention_mask.dtype,
device=attention_mask.device,
)
attention_mask = torch.cat(
(new_attn_mask_pad_left, attention_mask), dim=1
)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
# def initialize_vision_tokenizer(self, model_args, tokenizer):
def initialize_vision_tokenizer(self, model_args, num_new_tokens):
# if model_args.mm_use_im_patch_token:
# tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
# self.resize_token_embeddings(len(tokenizer))
if model_args.mm_use_im_start_end:
# num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
# self.resize_token_embeddings(len(tokenizer))
# if num_new_tokens > 0:
# input_embeddings = self.get_input_embeddings().weight.data
# output_embeddings = self.get_output_embeddings().weight.data
# input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
# dim=0, keepdim=True)
# output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
# dim=0, keepdim=True)
# input_embeddings[-num_new_tokens:] = input_embeddings_avg
# output_embeddings[-num_new_tokens:] = output_embeddings_avg
if model_args.tune_mm_mlp_adapter:
for p in self.get_input_embeddings().parameters():
p.requires_grad = True
for p in self.get_output_embeddings().parameters():
p.requires_grad = False
if model_args.pretrain_mm_mlp_adapter:
mm_projector_weights = torch.load(
model_args.pretrain_mm_mlp_adapter, map_location="cpu"
)
embed_tokens_weight = mm_projector_weights["model.embed_tokens.weight"]
assert num_new_tokens == 2
if input_embeddings.shape == embed_tokens_weight.shape:
input_embeddings[-num_new_tokens:] = embed_tokens_weight[
-num_new_tokens:
]
elif embed_tokens_weight.shape[0] == num_new_tokens:
input_embeddings[-num_new_tokens:] = embed_tokens_weight
else:
raise ValueError(
f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}."
)
elif model_args.mm_use_im_patch_token:
if model_args.tune_mm_mlp_adapter:
for p in self.get_input_embeddings().parameters():
p.requires_grad = False
for p in self.get_output_embeddings().parameters():
p.requires_grad = False
# Path: model/llava/model/llava_arch.py
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)
def get_vision_tower(self):
vision_tower = getattr(self, "vision_tower", None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
self.config.use_mm_proj = True
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
if not hasattr(self, "mm_projector"):
self.mm_projector = nn.Linear(
self.config.mm_hidden_size, self.config.hidden_size
)
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(
pretrain_mm_mlp_adapter, map_location="cpu"
)
def get_w(weights, keyword):
return {
k.split(keyword + ".")[1]: v
for k, v in weights.items()
if keyword in k
}
self.mm_projector.load_state_dict(
get_w(mm_projector_weights, "mm_projector")
)
# Path: model/llava/model/language_model/llava_llama.py
from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers import (AutoConfig, AutoModelForCausalLM, LlamaConfig,
LlamaForCausalLM, LlamaModel)
from transformers.modeling_outputs import CausalLMOutputWithPast
from ..llava_arch import LlavaMetaForCausalLM, LlavaMetaModel
import torch
import torch.nn as nn
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaConfig(LlamaConfig):
model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
config_class = LlavaConfig
def __init__(self, config: LlamaConfig):
super(LlavaLlamaModel, self).__init__(config)
class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
| config_class = LlavaConfig |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xmu-xiaoma666/X-Dreamer
# Path: render/mesh.py
class Mesh:
def __init__(self, v_pos=None, t_pos_idx=None, v_nrm=None, t_nrm_idx=None, v_tex=None, t_tex_idx=None, v_tng=None, t_tng_idx=None, material=None, base=None):
def copy_none(self, other):
def clone(self):
def load_mesh(filename, mtl_override=None):
def aabb(mesh):
def compute_edges(attr_idx, return_inverse=False):
def compute_edge_to_face_mapping(attr_idx, return_inverse=False):
def unit_size(mesh):
def center_by_reference(base_mesh, ref_aabb, scale):
def auto_normals(imesh):
def compute_tangents(imesh):
# Path: render/render.py
def interpolate(attr, rast, attr_idx, rast_db=None):
def shade(
gb_pos,
gb_geometric_normal,
gb_normal,
gb_tangent,
gb_texc,
gb_texc_deriv,
view_pos,
lgt,
material,
bsdf,
if_normal,
normal_rotate,
mode,
if_flip_the_normal,
if_use_bump
):
def render_layer(
rast,
rast_deriv,
mesh,
view_pos,
lgt,
resolution,
spp,
msaa,
bsdf,
if_normal,
normal_rotate,
mode,
if_flip_the_normal,
if_use_bump
):
def render_mesh(
ctx,
mesh,
mtx_in,
view_pos,
lgt,
resolution,
spp = 1,
num_layers = 1,
msaa = False,
background = None,
bsdf = None,
if_normal = False,
normal_rotate = None,
mode = 'geometry_modeling',
if_flip_the_normal = False,
if_use_bump = False
):
def prepare_input_vector(x):
def composite_buffer(key, layers, background, antialias):
def render_uv(ctx, mesh, resolution, mlp_texture):
def uv_padding(image, hole_mask, padding = 2, uv_padding_block = 4):
def render_uv1(ctx, mesh, resolution, mlp_texture, uv_padding_block):
# Path: render/util.py
def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
def reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:
def length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:
def safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:
def to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:
def _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:
def rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:
def _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:
def srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:
def reinhard(f: torch.Tensor) -> torch.Tensor:
def mse_to_psnr(mse):
def psnr_to_mse(psnr):
def get_miplevels(texture: np.ndarray) -> float:
def tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:
def cube_to_dir(s, x, y):
def latlong_to_cubemap(latlong_map, res):
def cubemap_to_latlong(cubemap, res):
def scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:
def scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:
def avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:
def segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:
def fovx_to_fovy(fovx, aspect):
def focal_length_to_fovy(focal_length, sensor_height):
def perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):
def perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):
def translate(x, y, z, device=None):
def rotate_x(a, device=None):
def rotate_x_1(a, device=None):
def rotate_y(a, device=None):
def rotate_y_1(a, device=None):
def rotate_y_2(a, device=None):
def rotate_x_2(a, device=None):
def scale(s, device=None):
def lookAt(eye, at, up):
def random_rotation_translation(t, device=None):
def random_rotation(device=None):
def lines_focal(o, d):
def cosine_sample(N, size=None):
def bilinear_downsample(x : torch.tensor) -> torch.Tensor:
def bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:
def init_glfw():
def save_image(fn, x : np.ndarray):
def save_image_raw(fn, x : np.ndarray):
def load_image_raw(fn) -> np.ndarray:
def load_image(fn) -> np.ndarray:
def time_to_text(x):
def checkerboard(res, checker_size) -> np.ndarray:
def get_random_bg(h, w):
R, L = aspect*y, -aspect*y
T, B = y, -y
I = torch.eye(3, dtype=o.dtype, device=o.device)
S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)
C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)
N = N/torch.linalg.norm(N)
# Path: geometry/dmtet_network.py
class Decoder(torch.nn.Module):
def __init__(self, input_dims = 3, internal_dims = 128, output_dims = 4, hidden = 2, multires = 2, AABB=None, mesh_scale = 2.1):
super().__init__()
self.mesh_scale = mesh_scale
desired_resolution = 4096
base_grid_resolution = 16
num_levels = 16
per_level_scale = np.exp(np.log(desired_resolution / base_grid_resolution) / (num_levels-1))
self.AABB= AABB
enc_cfg = {
"otype": "HashGrid",
"n_levels": num_levels,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": base_grid_resolution,
"per_level_scale" : per_level_scale
}
gradient_scaling = 1.0 #128
self.encoder = tcnn.Encoding(3, enc_cfg)
mlp_cfg = {
"n_input_dims" : self.encoder.n_output_dims,
"n_output_dims" : 4,
"n_hidden_layers" : 2,
"n_neurons" : 32
}
self.net = _MLP(mlp_cfg, gradient_scaling)
def forward(self, p):
_texc = (p.view(-1, 3) - self.AABB[0][None, ...]) / (self.AABB[1][None, ...] - self.AABB[0][None, ...])
_texc = torch.clamp(_texc, min=0, max=1)
p_enc = self.encoder(_texc.contiguous())
out = self.net(p_enc)
return out
def pre_train_ellipsoid(self, it, scene_and_vertices):
if it% 100 ==0:
print (f"Initialize SDF; it: {it}")
loss_fn = torch.nn.MSELoss()
scene = scene_and_vertices[0]
points_surface = scene_and_vertices[1].astype(np.float32)
points_surface_disturbed = points_surface + np.random.normal(loc=0.0, scale=0.05, size=points_surface.shape).astype(np.float32)
point_rand = (np.random.rand(3000,3).astype(np.float32)-0.5)* self.mesh_scale
query_point = np.concatenate((points_surface, points_surface_disturbed, point_rand))
signed_distance = scene.compute_signed_distance(query_point)
ref_value = torch.from_numpy(signed_distance.numpy()).float().cuda()
query_point = torch.from_numpy(query_point).float().cuda()
output = self(query_point)
loss = loss_fn(output[...,0], ref_value)
return loss
# Path: render/regularizer.py
def image_grad(buf, std=0.01):
def avg_edge_length(v_pos, t_pos_idx):
def laplace_regularizer_const(v_pos, t_pos_idx):
def normal_consistency(v_pos, t_pos_idx):
# Path: geometry/dmtet_x_dreamer.py
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import os
from render import mesh
from render import render
from render import util
from geometry.dmtet_network import Decoder
from render import regularizer
from torch.cuda.amp import custom_bwd, custom_fwd
tet_idx = _idx(torch.div(face_gidx, 2, rounding_mode='trunc'), N)
tri_idx = face_gidx % 2
uv_idx = torch.stack((
tet_idx * 4, tet_idx * 4 + tri_idx + 1, tet_idx * 4 + tri_idx + 2
), dim = -1). view(-1, 3)
return uvs, uv_idx
###############################################################################
# Marching tets implementation
###############################################################################
def __call__(self, pos_nx3, sdf_n, tet_fx4):
with torch.no_grad():
occ_n = sdf_n > 0
occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1,4)
occ_sum = torch.sum(occ_fx4, -1)
valid_tets = (occ_sum>0) & (occ_sum<4)
occ_sum = occ_sum[valid_tets]
all_edges = tet_fx4[valid_tets][:,self.base_tet_edges].reshape(-1,2)
all_edges = self.sort_edges(all_edges)
unique_edges, idx_map = torch.unique(all_edges,dim=0, return_inverse=True)
unique_edges = unique_edges.long()
mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1,2).sum(-1) == 1
mapping = torch.ones((unique_edges.shape[0]), dtype=torch.long, device="cuda") * -1
mapping[mask_edges] = torch.arange(mask_edges.sum(), dtype=torch.long,device="cuda")
idx_map = mapping[idx_map]
interp_v = unique_edges[mask_edges]
edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1,2,3)
edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1,2,1)
edges_to_interp_sdf[:,-1] *= -1
denominator = edges_to_interp_sdf.sum(1,keepdim = True)
edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1])/denominator
verts = (edges_to_interp * edges_to_interp_sdf).sum(1)
idx_map = idx_map.reshape(-1,6)
v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device="cuda"))
tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)
num_triangles = self.num_triangles_table[tetindex]
faces = torch.cat((
torch.gather(input=idx_map[num_triangles == 1], dim=1, index=self.triangle_table[tetindex[num_triangles == 1]][:, :3]).reshape(-1,3),
torch.gather(input=idx_map[num_triangles == 2], dim=1, index=self.triangle_table[tetindex[num_triangles == 2]][:, :6]).reshape(-1,3),
), dim=0)
return verts, faces
class CameraEncoder(nn.Module):
def __init__(self):
super(CameraEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(6, 512),
nn.ReLU(),
nn.Linear(512, 1024),
nn.ReLU()
)
def forward(self, camera_data):
encoded_vector = self.encoder(camera_data)
return encoded_vector
###############################################################################
# Geometry interface
###############################################################################
class DMTetGeometry(torch.nn.Module):
def __init__(self, grid_res, scale, FLAGS):
super(DMTetGeometry, self).__init__()
self.FLAGS = FLAGS
self.grid_res = grid_res
self.marching_tets = DMTet()
tets = np.load('data/tets/{}_tets.npz'.format(self.grid_res))
self.verts = torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * scale
print("tet grid min/max", torch.min(self.verts).item(), torch.max(self.verts).item())
self.decoder = Decoder(multires=0 , AABB= self.getAABB(), mesh_scale= scale)
self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')
self.generate_edges()
self.pos_encoder = CameraEncoder().to(self.verts.device)
def generate_edges(self):
with torch.no_grad():
edges = torch.tensor([0,1,0,2,0,3,1,2,1,3,2,3], dtype = torch.long, device = "cuda")
all_edges = self.indices[:,edges].reshape(-1,2)
all_edges_sorted = torch.sort(all_edges, dim=1)[0]
self.all_edges = torch.unique(all_edges_sorted, dim=0)
@torch.no_grad()
def getAABB(self):
return torch.min(self.verts, dim=0).values, torch.max(self.verts, dim=0).values
def getMesh(self, material):
pred= self.decoder(self.verts)
self.sdf , self.deform = pred[:, 0], pred[:, 1:]
v_deformed = self.verts + 1 / (self.grid_res ) * torch.tanh(self.deform)
verts, faces = self.marching_tets(v_deformed, self.sdf, self.indices)
imesh = mesh.Mesh(verts, faces, material=material)
imesh = mesh.auto_normals(imesh)
return imesh
def render(self, glctx, target, lgt, opt_material, bsdf=None, if_normal=False, mode = 'geometry_modeling', if_flip_the_normal = False, if_use_bump = False):
opt_mesh = self.getMesh(opt_material)
return render.render_mesh(glctx,
opt_mesh,
target['mvp'],
target['campos'],
lgt,
target['resolution'],
spp=target['spp'],
msaa= True,
background= target['background'],
bsdf= bsdf,
if_normal= if_normal,
normal_rotate= target['normal_rotate'],
mode = mode,
| if_flip_the_normal = if_flip_the_normal, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhenzhiwang/intercontrol
# Path: data_loaders/amass/sampling/base.py
class FrameSampler:
sampling: str = "conseq"
sampling_step: int = 1
request_frames: Optional[int] = None
threshold_reject: int = 0.75
# args = frame_sampler_parser()
max_len: int = 250
min_len: int = 45
# max_len: int = 360
# min_len: int = 15
def __call__(self, num_frames):
from .frames import get_frameix_from_data_index
return get_frameix_from_data_index(num_frames,
self.max_len,
self.request_frames,
self.sampling,
self.sampling_step)
def accept(self, duration):
# Outputs have original lengths
# Check if it is too long
if self.request_frames is None:
if duration > self.max_len:
return False
if duration < self.min_len:
return False
else:
# Reject sample if the length is
# too little relative to
# the request frames
# min_number = self.threshold_reject * self.request_frames
if duration < self.min_len: # min_number:
return False
return True
def get(self, key, default=None):
return getattr(self, key, default)
def __getitem__(self, key):
return getattr(self, key)
# Path: data_loaders/amass/transforms/smpl.py
class SMPLTransform(Transform):
def __init__(self, batch_size=16, rots2rfeats: Rots2Rfeats = None,
rots2joints: Rots2Joints = None,
joints2jfeats: Joints2Jfeats = None,
**kwargs):
if rots2rfeats == None:
rots2rfeats = Globalvelandy(path='./data_loaders/amass/transforms/rots2rfeats/globalvelandy/rot6d/babel-amass/separate_pairs',
normalization=True,
pose_rep='rot6d',
canonicalize=True,
offset=True,
name='Globalvelandy')
if rots2joints == None:
rots2joints = SMPLH(path='./body_models/smpl_models/smplh',
jointstype='smplnh',
input_pose_rep='matrix',
batch_size=batch_size,
gender='male',
name='SMPLH')
if joints2jfeats == None:
joints2jfeats = None # FIXME : prob not it use
self.rots2rfeats = rots2rfeats
self.rots2joints = rots2joints
self.joints2jfeats = joints2jfeats
def Datastruct(self, **kwargs):
return SMPLDatastruct(_rots2rfeats=self.rots2rfeats,
_rots2joints=self.rots2joints,
_joints2jfeats=self.joints2jfeats,
transforms=self,
**kwargs)
def __repr__(self):
return "SMPLTransform()"
# Path: data_loaders/get_data.py
def get_dataset_loader(name, batch_size, num_frames, split='train', load_mode='train', opt=None, short_db=False, cropping_sampler=False, size=None):
if load_mode == 'text_only':
load_mode = 'train'
dataset = get_dataset(name, num_frames, split, load_mode, batch_size, opt, short_db, cropping_sampler, size)
collate = get_collate_fn(name, load_mode)
n_workers = 1 if load_mode in ['movement_train', 'evaluator_train'] else 8
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True,
num_workers=n_workers, drop_last=True, collate_fn=collate
)
return loader
# Path: data_loaders/humanml/options/train_options.py
class TrainTexMotMatchOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser.add_argument('--name', type=str, default="test", help='Name of this trial')
self.parser.add_argument("--gpu_id", type=int, default=-1,
help='GPU id')
self.parser.add_argument("--train_platform_type", default='NoPlatform',
choices=['NoPlatform', 'ClearmlPlatform', 'TensorboardPlatform'], type=str,
help="Choose platform to log results. NoPlatform means no logging.")
self.parser.add_argument("--num_frames", default=200, type=int,
help="Limit for the maximal number of frames. In HumanML3D and KIT this field is ignored.")
self.parser.add_argument('--dataset_name', type=str, default='t2m', help='Dataset Name')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--decomp_name', type=str, default="Decomp_SP001_SM001_H512", help='Name of this trial')
self.parser.add_argument('--batch_size', type=int, default=32, help='Batch size')
self.parser.add_argument("--unit_length", type=int, default=4, help="Length of motion")
self.parser.add_argument("--max_text_len", type=int, default=20, help="Length of motion")
self.parser.add_argument('--dim_movement_enc_hidden', type=int, default=512,
help='Dimension of hidden unit in GRU')
self.parser.add_argument('--dim_movement_latent', type=int, default=512, help='Dimension of hidden unit in GRU')
self.parser.add_argument('--dim_text_hidden', type=int, default=512, help='Dimension of hidden unit in GRU')
self.parser.add_argument('--dim_motion_hidden', type=int, default=1024, help='Dimension of hidden unit in GRU')
self.parser.add_argument('--dim_coemb_hidden', type=int, default=512, help='Dimension of hidden unit in GRU')
self.parser.add_argument('--max_epoch', type=int, default=300, help='Training iterations')
self.parser.add_argument('--estimator_mod', type=str, default='bigru')
self.parser.add_argument('--feat_bias', type=float, default=5, help='Layers of GRU')
self.parser.add_argument('--negative_margin', type=float, default=10.0)
self.parser.add_argument('--lr', type=float, default=1e-4, help='Layers of GRU')
self.parser.add_argument('--is_continue', action="store_true", help='Training iterations')
self.parser.add_argument('--movement_from_scratch', action="store_true", help='Training iterations')
self.parser.add_argument('--log_every', type=int, default=50, help='Frequency of printing training progress')
self.parser.add_argument('--save_every_e', type=int, default=5, help='Frequency of printing training progress')
self.parser.add_argument('--eval_every_e', type=int, default=5, help='Frequency of printing training progress')
self.parser.add_argument('--save_latest', type=int, default=500, help='Frequency of printing training progress')
def parse(self):
self.opt = self.parser.parse_args()
self.opt.is_train = True
args = vars(self.opt)
return self.opt
# Path: data_loaders/humanml/networks/trainers.py
class TextMotionMatchTrainer(object):
def __init__(self, args, text_encoder, motion_encoder, movement_encoder):
self.opt = args
self.text_encoder = text_encoder
self.motion_encoder = motion_encoder
self.movement_encoder = movement_encoder
self.device = args.device
if args.is_train:
# self.motion_dis
self.logger = Logger2(args)
self.contrastive_loss = ContrastiveLoss(self.opt.negative_margin)
def resume(self, model_dir):
checkpoints = torch.load(model_dir, map_location=self.device)
self.text_encoder.load_state_dict(checkpoints['text_encoder'])
self.motion_encoder.load_state_dict(checkpoints['motion_encoder'])
self.movement_encoder.load_state_dict(checkpoints['movement_encoder'])
self.opt_text_encoder.load_state_dict(checkpoints['opt_text_encoder'])
self.opt_motion_encoder.load_state_dict(checkpoints['opt_motion_encoder'])
return checkpoints['epoch'], checkpoints['iter']
def save(self, model_dir, epoch, niter):
state = {
'text_encoder': self.text_encoder.state_dict(),
'motion_encoder': self.motion_encoder.state_dict(),
'movement_encoder': self.movement_encoder.state_dict(),
'opt_text_encoder': self.opt_text_encoder.state_dict(),
'opt_motion_encoder': self.opt_motion_encoder.state_dict(),
'epoch': epoch,
'iter': niter,
}
torch.save(state, model_dir)
@staticmethod
def zero_grad(opt_list):
for opt in opt_list:
opt.zero_grad()
@staticmethod
def clip_norm(network_list):
for network in network_list:
clip_grad_norm_(network.parameters(), 0.5)
@staticmethod
def step(opt_list):
for opt in opt_list:
opt.step()
def to(self, device):
self.text_encoder.to(device)
self.motion_encoder.to(device)
self.movement_encoder.to(device)
def train_mode(self):
self.text_encoder.train()
self.motion_encoder.train()
self.movement_encoder.eval()
def forward(self, batch_data):
word_emb, pos_ohot, caption, cap_lens, motions, m_lens, _ = batch_data
word_emb = word_emb.detach().to(self.device).float()
pos_ohot = pos_ohot.detach().to(self.device).float()
motions = motions.detach().to(self.device).float()
# Sort the length of motions in descending order, (length of text has been sorted)
self.align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()
# print(self.align_idx)
# print(m_lens[self.align_idx])
motions = motions[self.align_idx]
m_lens = m_lens[self.align_idx]
'''Movement Encoding'''
if self.opt.foot_contact_entries > 0:
_motions = motions[..., :-self.opt.foot_contact_entries]
else:
_motions = motions
movements = self.movement_encoder(_motions).detach()
m_lens = m_lens // self.opt.unit_length
self.motion_embedding = self.motion_encoder(movements, m_lens)
'''Text Encoding'''
# time0 = time.time()
# text_input = torch.cat([word_emb, pos_ohot], dim=-1)
self.text_embedding = self.text_encoder(word_emb, pos_ohot, cap_lens)
self.text_embedding = self.text_embedding.clone()[self.align_idx]
def backward(self):
batch_size = self.text_embedding.shape[0]
'''Positive pairs'''
pos_labels = torch.zeros(batch_size).to(self.text_embedding.device)
self.loss_pos = self.contrastive_loss(self.text_embedding, self.motion_embedding, pos_labels)
'''Negative Pairs, shifting index'''
neg_labels = torch.ones(batch_size).to(self.text_embedding.device)
shift = np.random.randint(0, batch_size-1)
new_idx = np.arange(shift, batch_size + shift) % batch_size
self.mis_motion_embedding = self.motion_embedding.clone()[new_idx]
self.loss_neg = self.contrastive_loss(self.text_embedding, self.mis_motion_embedding, neg_labels)
self.loss = self.loss_pos + self.loss_neg
loss_logs = OrderedDict({})
loss_logs['loss'] = self.loss.item()
loss_logs['loss_pos'] = self.loss_pos.item()
loss_logs['loss_neg'] = self.loss_neg.item()
return loss_logs
def update(self):
self.zero_grad([self.opt_motion_encoder, self.opt_text_encoder])
loss_logs = self.backward()
self.loss.backward()
self.clip_norm([self.text_encoder, self.motion_encoder])
self.step([self.opt_text_encoder, self.opt_motion_encoder])
return loss_logs
def train(self, train_dataloader, val_dataloader):
self.to(self.device)
self.opt_motion_encoder = optim.Adam(self.motion_encoder.parameters(), lr=self.opt.lr)
self.opt_text_encoder = optim.Adam(self.text_encoder.parameters(), lr=self.opt.lr)
epoch = 0
it = 0
if self.opt.is_continue:
model_dir = pjoin(self.opt.model_dir, 'latest.tar')
epoch, it = self.resume(model_dir)
start_time = time.time()
total_iters = self.opt.max_epoch * len(train_dataloader)
print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_dataloader), len(val_dataloader)))
val_loss = 1000.
logs = OrderedDict()
min_val_loss = np.inf
while epoch < self.opt.max_epoch:
# time0 = time.time()
for i, batch_data in enumerate(train_dataloader):
self.train_mode()
self.forward(batch_data)
# time3 = time.time()
log_dict = self.update()
for k, v in log_dict.items():
if k not in logs:
logs[k] = v
else:
logs[k] += v
it += 1
if it % self.opt.log_every == 0:
mean_loss = OrderedDict({'val_loss': val_loss})
self.logger.scalar_summary('val_loss', val_loss, it)
for tag, value in logs.items():
self.logger.scalar_summary(tag, value / self.opt.log_every, it)
mean_loss[tag] = value / self.opt.log_every
logs = OrderedDict()
print_current_loss_decomp(start_time, it, total_iters, mean_loss, epoch, i)
if it % self.opt.save_latest == 0:
self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
epoch += 1
if epoch % self.opt.save_every_e == 0:
self.save(pjoin(self.opt.model_dir, 'E%04d.tar' % (epoch)), epoch, it)
print('Validation time:')
loss_pos_pair = 0
loss_neg_pair = 0
val_loss = 0
with torch.no_grad():
for i, batch_data in enumerate(val_dataloader):
self.forward(batch_data)
self.backward()
loss_pos_pair += self.loss_pos.item()
loss_neg_pair += self.loss_neg.item()
val_loss += self.loss.item()
loss_pos_pair /= len(val_dataloader) + 1
loss_neg_pair /= len(val_dataloader) + 1
val_loss /= len(val_dataloader) + 1
print('Validation Loss: %.5f Positive Loss: %.5f Negative Loss: %.5f' %
(val_loss, loss_pos_pair, loss_neg_pair))
if val_loss < min_val_loss:
self.save(pjoin(self.opt.model_dir, 'finest.tar'), epoch, it)
min_val_loss = val_loss
if epoch % self.opt.eval_every_e == 0:
pos_dist = F.pairwise_distance(self.text_embedding, self.motion_embedding)
neg_dist = F.pairwise_distance(self.text_embedding, self.mis_motion_embedding)
pos_str = ' '.join(['%.3f' % (pos_dist[i]) for i in range(pos_dist.shape[0])])
neg_str = ' '.join(['%.3f' % (neg_dist[i]) for i in range(neg_dist.shape[0])])
save_path = pjoin(self.opt.eval_dir, 'E%03d.txt' % (epoch))
with cs.open(save_path, 'w') as f:
f.write('Positive Pairs Distance\n')
f.write(pos_str + '\n')
f.write('Negative Pairs Distance\n')
f.write(neg_str + '\n')
# Path: data_loaders/humanml/data/dataset.py
class Text2MotionDatasetV2(data.Dataset):
def __init__(self, opt, mean, std, split_file, w_vectorizer, num_frames, size=None, **kwargs):
self.opt = opt
self.w_vectorizer = w_vectorizer
self.max_length = 20
self.pointer = 0
self.num_frames = num_frames if num_frames else False
self.max_motion_length = opt.max_motion_length
if (self.num_frames == False) or type(self.num_frames)==int:
min_motion_len = 40 if self.opt.dataset_name =='t2m' else 24
else:
min_motion_len = self.num_frames[0]
self.max_motion_length = self.num_frames[1]
data_dict = {}
id_list = []
with cs.open(split_file, 'r') as f:
for line in f.readlines():
id_list.append(line.strip())
id_list = id_list[:size]
new_name_list = []
length_list = []
for name in tqdm(id_list):
try:
motion = np.load(pjoin(opt.motion_dir, name + '.npy'))
if (len(motion)) < min_motion_len or (len(motion) >= 200):
continue
text_data = []
flag = False
with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:
for line in f.readlines():
text_dict = {}
line_split = line.strip().split('#')
caption = line_split[0]
tokens = line_split[1].split(' ')
f_tag = float(line_split[2])
to_tag = float(line_split[3])
f_tag = 0.0 if np.isnan(f_tag) else f_tag
to_tag = 0.0 if np.isnan(to_tag) else to_tag
text_dict['caption'] = caption
text_dict['tokens'] = tokens
if f_tag == 0.0 and to_tag == 0.0:
flag = True
text_data.append(text_dict)
else:
try:
n_motion = motion[int(f_tag*20) : int(to_tag*20)]
if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200):
continue
new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
while new_name in data_dict:
new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
if self.num_frames != False:
if len(n_motion) >= self.max_motion_length:
bias = random.randint(0, len(n_motion) - self.max_motion_length)
data_dict[new_name] = {'motion': n_motion[bias: bias+self.max_motion_length],
'length': self.max_motion_length,
'text': [text_dict]}
length_list.append(self.max_motion_length)
else:
data_dict[new_name] = {'motion': n_motion,
'length': len(n_motion),
'text': [text_dict]}
length_list.append(len(n_motion))
else:
data_dict[new_name] = {'motion': n_motion,
'length': len(n_motion),
'text':[text_dict]}
length_list.append(len(n_motion))
new_name_list.append(new_name)
except:
print(line_split)
print(line_split[2], line_split[3], f_tag, to_tag, name)
# break
if flag:
if self.num_frames != False:
if len(motion) >= self.max_motion_length:
bias = random.randint(0, len(motion) - self.max_motion_length)
data_dict[name] = {'motion': motion[bias: bias + self.max_motion_length],
'length': self.max_motion_length,
'text': [text_dict]}
length_list.append(self.max_motion_length)
else:
data_dict[name] = {'motion': motion,
'length': len(motion),
'text': text_data}
length_list.append(len(motion))
else:
data_dict[name] = {'motion': motion,
'length': len(motion),
'text': text_data}
length_list.append(len(motion))
new_name_list.append(name)
except Exception as e:
print(e)
pass
name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1]))
self.mean = mean
self.std = std
self.length_arr = np.array(length_list)
self.data_dict = data_dict
self.name_list = name_list
self.reset_max_len(self.max_length)
def reset_max_len(self, length):
assert length <= self.max_motion_length
self.pointer = np.searchsorted(self.length_arr, length)
print("Pointer Pointing at %d"%self.pointer)
self.max_length = length
def inv_transform(self, data):
return data * self.std + self.mean
def inv_transform_torch(self, data):
return data * torch.tensor(self.std, dtype=data.dtype, device=data.device) + torch.tensor(self.mean, dtype=data.dtype, device=data.device)
def __len__(self):
return len(self.data_dict) - self.pointer
def __getitem__(self, item):
idx = self.pointer + item
data = self.data_dict[self.name_list[idx]]
motion, m_length, text_list = data['motion'], data['length'], data['text']
# Randomly select a caption
text_data = random.choice(text_list)
caption, tokens = text_data['caption'], text_data['tokens']
if len(tokens) < self.opt.max_text_len:
# pad with "unk"
tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
sent_len = len(tokens)
tokens = tokens + ['unk/OTHER'] * (self.opt.max_text_len + 2 - sent_len)
else:
# crop
tokens = tokens[:self.opt.max_text_len]
tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
sent_len = len(tokens)
pos_one_hots = []
word_embeddings = []
for token in tokens:
word_emb, pos_oh = self.w_vectorizer[token]
pos_one_hots.append(pos_oh[None, :])
word_embeddings.append(word_emb[None, :])
pos_one_hots = np.concatenate(pos_one_hots, axis=0)
word_embeddings = np.concatenate(word_embeddings, axis=0)
# Crop the motions in to times of 4, and introduce small variations
if self.opt.unit_length < 10:
coin2 = np.random.choice(['single', 'single', 'double'])
else:
coin2 = 'single'
if coin2 == 'double':
m_length = (m_length // self.opt.unit_length - 1) * self.opt.unit_length
elif coin2 == 'single':
m_length = (m_length // self.opt.unit_length) * self.opt.unit_length
idx = random.randint(0, len(motion) - m_length)
motion = motion[idx:idx+m_length]
"Z Normalization"
motion = (motion - self.mean) / self.std
if m_length < self.max_motion_length:
motion = np.concatenate([motion,
np.zeros((self.max_motion_length - m_length, motion.shape[1]))
], axis=0)
return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens), []
# Path: data_loaders/humanml/data/dataset.py
def collate_fn(batch):
batch.sort(key=lambda x: x[3], reverse=True)
return default_collate(batch)
# Path: data_loaders/humanml/data/dataset.py
class BABEL_Text2MotionDatasetV2(BABEL):
def __init__(self, split, datapath, transforms, opt, mean, std, w_vectorizer, sampler, mode, **kwargs):
BABEL.__init__(self, datapath=datapath, transforms=transforms, split=split, sampler=sampler,
parse_tokens=True, mode=mode, short_db=kwargs.get('short_db', False),
cropping_sampler=kwargs.get('cropping_sampler', False)) # tokens are needed for training
self.opt = opt
self.w_vectorizer = w_vectorizer
self.max_length = 20
self.pointer = 0
self.max_motion_length = opt.max_motion_length
self.mean = mean
self.std = std
def inv_transform(self, data):
return data * self.std + self.mean
def __getitem__(self, item):
keyid = self._split_index[item]
batch = self.load_keyid(keyid, mode='train')
# Randomly choose a motion from batch
caption = batch['text']
tokens = batch['tokens']
motion = batch['features']
m_length = batch['length']
tansition_seq = batch['is_transition']
if len(tokens) < self.opt.max_text_len:
# pad with "unk"
tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
sent_len = len(tokens)
tokens = tokens + ['unk/OTHER'] * (self.opt.max_text_len + 2 - sent_len)
else:
# crop
tokens = tokens[:self.opt.max_text_len]
tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
sent_len = len(tokens)
pos_one_hots = []
word_embeddings = []
for token in tokens:
word_emb, pos_oh = self.w_vectorizer[token]
pos_one_hots.append(pos_oh[None, :])
word_embeddings.append(word_emb[None, :])
pos_one_hots = np.concatenate(pos_one_hots, axis=0)
word_embeddings = np.concatenate(word_embeddings, axis=0)
# Crop the motions in to times of 4, and introduce small variations
if self.opt.unit_length < 10:
coin2 = np.random.choice(['single', 'single', 'double'])
else:
coin2 = 'single'
if coin2 == 'double':
m_length = (m_length // self.opt.unit_length - 1) * self.opt.unit_length
elif coin2 == 'single':
m_length = (m_length // self.opt.unit_length) * self.opt.unit_length
idx = random.randint(0, abs(len(motion) - m_length))
motion = motion[idx:idx+m_length]
tansition_seq = tansition_seq[idx:idx+m_length]
"Z Normalization"
motion = (motion - self.mean) / self.std
if m_length <= self.max_motion_length:
motion = np.concatenate([motion,
np.zeros((self.max_motion_length - m_length, motion.shape[1]))
], axis=0)
tansition_seq = np.concatenate([tansition_seq,
np.zeros(self.max_motion_length - m_length)
])
# print(word_embeddings.shape, motion.shape)
# print(tokens)
return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens), tansition_seq
# Path: data_loaders/humanml/utils/word_vectorizer.py
class WordVectorizer(object):
def __init__(self, meta_root, prefix):
def _get_pos_ohot(self, pos):
def __len__(self):
def __getitem__(self, item):
# Path: data_loaders/humanml/collect_babel_stats.py
def run():
# Path: data_loaders/humanml/train_tex_mot_match.py
import os
import torch
from os.path import join as pjoin
from data_loaders.amass.sampling import FrameSampler
from data_loaders.amass.transforms import SMPLTransform
from data_loaders.get_data import get_dataset_loader
from data_loaders.humanml.options.train_options import TrainTexMotMatchOptions
from data_loaders.humanml.networks.modules import *
from data_loaders.humanml.networks.trainers import TextMotionMatchTrainer
from data_loaders.humanml.data.dataset import Text2MotionDatasetV2, collate_fn, BABEL_Text2MotionDatasetV2
from data_loaders.humanml.scripts.motion_process import *
from torch.utils.data import DataLoader
from data_loaders.humanml.utils.word_vectorizer import WordVectorizer, POS_enumerator
from copy import deepcopy
from data_loaders.humanml import collect_babel_stats
def build_models(opt):
movement_enc = MovementConvEncoder(opt.dim_pose - opt.foot_contact_entries, opt.dim_movement_enc_hidden, opt.dim_movement_latent)
text_enc = TextEncoderBiGRUCo(word_size=dim_word,
pos_size=dim_pos_ohot,
hidden_size=opt.dim_text_hidden,
output_size=opt.dim_coemb_hidden,
| device=opt.device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AGI-Collective/Robin
# Path: robin/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: robin/constants.py
DEFAULT_IMAGE_TOKEN = "<image>"
# Path: robin/constants.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: robin/constants.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: robin/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: robin/mm_utils.py
def process_images(images, image_processor, model_cfg):
image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
new_images = []
#Hardcoded because reasons.
image_mean = (0.48145466, 0.4578275, 0.40821073)
if image_aspect_ratio == 'pad':
for image in images:
# TODO: Simon: don't hardcode image mean, also this is duplicated code with train.py
image_mean = getattr(image_processor, "image_mean", (0.48145466, 0.4578275, 0.40821073))
image = expand2square(image, tuple(int(x*255) for x in image_mean))
# TODO: Simon this is nasty, we need a more unified interface here
if hasattr(image_processor, "preprocess"):
image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
else:
image = image_processor(image).unsqueeze(0)
new_images.append(image)
else:
return image_processor(images, return_tensors='pt')['pixel_values']
if all(x.shape == new_images[0].shape for x in new_images):
new_images = torch.stack(new_images, dim=0)
return new_images
# Path: robin/mm_utils.py
def process_images_easy(images, image_processor, image_aspect_ratio):
new_images = []
image_mean = (0.48145466, 0.4578275, 0.40821073)
if image_aspect_ratio == 'pad':
for image in images:
image_mean = getattr(image_processor, "image_mean", (0.48145466, 0.4578275, 0.40821073))
image = expand2square(image, tuple(int(x*255) for x in image_mean))
if hasattr(image_processor, "preprocess"):
image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
else:
image = image_processor(image).unsqueeze(0)
new_images.append(image)
else:
return image_processor(images, return_tensors='pt')['pixel_values']
if all(x.shape == new_images[0].shape for x in new_images):
new_images = torch.stack(new_images, dim=0)
return new_images
# Path: robin/mm_utils.py
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
# Path: robin/mm_utils.py
def get_model_name_from_path(model_path):
model_path = model_path.strip("/")
model_paths = model_path.split("/")
if model_paths[-1].startswith('checkpoint-'):
return model_paths[-2] + "_" + model_paths[-1]
else:
return model_paths[-1]
# Path: robin/mm_utils.py
class KeywordsStoppingCriteria(StoppingCriteria):
def __init__(self, keywords, tokenizer, input_ids):
self.keywords = keywords
self.keyword_ids = []
self.max_keyword_len = 0
for keyword in keywords:
cur_keyword_ids = tokenizer(keyword).input_ids
if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
cur_keyword_ids = cur_keyword_ids[1:]
if len(cur_keyword_ids) > self.max_keyword_len:
self.max_keyword_len = len(cur_keyword_ids)
self.keyword_ids.append(torch.tensor(cur_keyword_ids))
self.tokenizer = tokenizer
self.start_len = input_ids.shape[1]
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
for keyword_id in self.keyword_ids:
if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():
return True
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
for keyword in self.keywords:
if keyword in outputs:
return True
return False
# Path: robin/model/builder.py
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda"):
kwargs = {"device_map": device_map}
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
kwargs['torch_dtype'] = torch.float16
# Load LLaVA model
if 'lora' in model_name.lower() and model_base is None:
warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base)
print('Loading LLaVA from base model...')
if 'mistral' in model_name:
model = LlavaMistralForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
else:
model = LlavaLlamaForCausalLM.from_pretrained(
model_base,
low_cpu_mem_usage=True,
config=lora_cfg_pretrained,
**kwargs,
# use_flash_attention_2 = True,
)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional LLaVA weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
print("Found non_lora_trainables")
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
from huggingface_hub import hf_hub_download
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
from peft import PeftModel
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
image_processor = None
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
model.resize_token_embeddings(len(tokenizer))
#This actually adds the vision tower to the model.
vision_tower = model.get_vision_tower()
print("Loaded this vision tower")
if not vision_tower.is_loaded:
vision_tower.load_model()
vision_tower.to(device=device, dtype=torch.float16)
image_processor = vision_tower.image_processor
finetuned_ve = False if "frozen" in model_name.lower() else True
if finetuned_ve:
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
print("Found lora_trainables")
original_weights = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'))
else:
# this is probably from HF Hub
from huggingface_hub import hf_hub_download
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file)
original_weights = load_from_hf(model_path, 'non_lora_trainables.bin')
#Convert names
new_weights = {}
for key in original_weights.keys():
new_key = str(key).replace("base_model.model.model.vision_tower.","")
new_weights[new_key] = original_weights[key]
del original_weights
#This is so that we can load strict
projection = {}
projections = ["base_model.model.model.mm_projector.0.weight", "base_model.model.model.mm_projector.0.bias", "base_model.model.model.mm_projector.2.weight", "base_model.model.model.mm_projector.2.bias"]
for key in projections:
projection[key] = new_weights.pop(key)
result = vision_tower.load_state_dict(new_weights, strict = True)
print("Loading strict resuts:", result)
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len
# Path: robin/utils.py
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
# Path: robin/serve/pipeline.py
import argparse
import requests
import torch
import sys, os
from io import BytesIO
from PIL import Image
from PIL import Image
from transformers import TextStreamer
from robin.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from robin.conversation import conv_templates, SeparatorStyle
from robin.mm_utils import process_images, process_images_easy, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from robin.model.builder import load_pretrained_model
from robin.utils import disable_torch_init
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
class LlavaMistralPipeline:
def __init__(self, model_path, model_base, device="cuda", load_8bit=False, load_4bit=False, temperature=.2, max_new_tokens=512):
self.model_path = model_path
self.model_base = model_base
self.device = device
self.load_8bit = load_8bit
self.load_4bit = load_4bit
self.device = device
self.temperature = temperature
self.max_new_tokens = max_new_tokens
# TODO: Simon: make this work reliably
if load_4bit or load_8bit:
print("WARNING: 4bit or 8bit models might not work as expected")
# Sure why not
disable_torch_init()
model_name = get_model_name_from_path(self.model_path)
self.tokenizer, self.model, self.image_processor, context_len = load_pretrained_model(self.model_path, self.model_base, model_name, self.load_8bit, self.load_4bit, device=self.device)
def _load_image_tensor(self, image_file):
image = load_image(image_file)
# Similar operation in model_worker.py
image_tensor = process_images_easy([image], self.image_processor, "pad")
if type(image_tensor) is list:
image_tensor = [image.to(self.model.device, dtype=torch.float16) for image in image_tensor]
else:
image_tensor = image_tensor.to(self.model.device, dtype=torch.float16)
return image_tensor
def __call__(self, messages):
conv = conv_templates['vicuna_v1'].copy()
assert conv.roles == ('USER', 'ASSISTANT')
# First message
assert messages[0]["role"] == "USER"
inp = messages[0]["content"]
if self.model.config.mm_use_im_start_end:
inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
else:
inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
# TODO: Simon: handle no image case
assert "image" in messages[0], 'First message needs to have an image url'
image_tensor = self._load_image_tensor(messages[0]["image"])
conv.append_message("USER", inp)
# Remaining messages
# We typically assume that follows the format of user, then assistant.
for message in messages[1:]:
assert message["role"] in ["USER", "ASSISTANT"], f"Only USER and ASSISTANT roles are supported, got {message['role']}"
assert "image" not in message, "Images can only be in the first user message"
conv.append_message(message["role"], message["content"])
# At the very end, we expect to see a user, so we add the empty assistant.
conv.append_message("ASSISTANT", None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
# For vicuna_v1, stop_str == "</s>"
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
keywords = [stop_str]
stopping_criteria = KeywordsStoppingCriteria(keywords, self.tokenizer, input_ids)
with torch.inference_mode():
output_ids = self.model.generate(
input_ids,
images=image_tensor,
do_sample=True,
temperature=self.temperature,
| max_new_tokens=self.max_new_tokens, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: huangb23/VTimeLLM
# Path: vtimellm/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: vtimellm/train/vtimellm_trainer.py
class VTimeLLMTrainer(Trainer):
def _save_checkpoint(self, model, trial, metrics=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
# Only save Adapter
keys_to_match = ['mm_projector']
if getattr(self.args, "use_im_start_end", False):
keys_to_match.extend(['embed_tokens', 'embed_in'])
weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match)
if self.args.local_rank == 0 or self.args.local_rank == -1:
self.model.config.save_pretrained(output_dir)
torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
else:
super(VTimeLLMTrainer, self)._save_checkpoint(model, trial, metrics)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
pass
else:
super(VTimeLLMTrainer, self)._save(output_dir, state_dict)
# Path: vtimellm/train/dataset.py
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args: DataArguments) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
train_dataset = LazySupervisedDataset(tokenizer=tokenizer,
data_path=data_args.data_path,
data_args=data_args)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
# Path: vtimellm/train/dataset.py
class DataArguments:
data_path: str = field(default=None,
metadata={"help": "Path to the training data."})
lazy_preprocess: bool = False
feat_folder: Optional[str] = field(default=None)
# Path: vtimellm/model/vtimellm_llama.py
class VTimeLLMLlamaForCausalLM(LlamaForCausalLM, VTimeLLMMetaForCausalLM):
config_class = VTimeLLMConfig
def __init__(self, config):
super(LlamaForCausalLM, self).__init__(config)
self.model = VTimeLLMLlamaModel(config)
self.pretraining_tp = config.pretraining_tp
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
if inputs_embeds is None:
(
input_ids,
position_ids,
attention_mask,
past_key_values,
inputs_embeds,
labels
) = self.prepare_inputs_labels_for_multimodal(
input_ids,
position_ids,
attention_mask,
past_key_values,
labels,
images
)
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
_inputs = super().prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
)
if images is not None:
_inputs['images'] = images
return _inputs
# Path: vtimellm/model/vtimellm_chatglm.py
class VTimeLLMChatGLMForCausalLM(ChatGLMForConditionalGeneration, VTimeLLMMetaForCausalLM):
config_class = VTimeLLMChatGLMConfig
def __init__(self, config, empty_init=True, device=None):
super(ChatGLMForConditionalGeneration, self).__init__(config)
self.transformer = VTimeLLMChatGLMModel(config, empty_init=empty_init, device=device)
self.max_sequence_length = config.max_length
self.config = config
self.quantized = False
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.transformer
def forward(
self,
input_ids: torch.LongTensor = None,
position_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
return_last_logit: Optional[bool] = False,
images: Optional[torch.FloatTensor] = None,
):
if inputs_embeds is None:
(
input_ids,
position_ids,
attention_mask,
past_key_values,
inputs_embeds,
labels
) = self.prepare_inputs_labels_for_multimodal(
input_ids,
position_ids,
attention_mask,
past_key_values,
labels,
images
)
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
_inputs = super().prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
)
if images is not None:
_inputs['images'] = images
return _inputs
# Path: vtimellm/model/builder.py
def load_lora(model, lora_path):
non_lora_trainables_path = os.path.join(lora_path, 'non_lora_trainables.bin')
if os.path.exists(non_lora_trainables_path):
non_lora_trainables = torch.load(non_lora_trainables_path, map_location='cpu')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, lora_path)
return model
# Path: vtimellm/mm_utils.py
def print_trainable_parameters(model):
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
# print(_, param.requires_grad, param.numel())
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}"
)
# Path: vtimellm/train/train.py
import os
import logging
import pathlib
import torch
import transformers
import sys
from dataclasses import dataclass, field
from typing import Dict, Optional, Sequence, List
from vtimellm import conversation as conversation_lib
from vtimellm.train.vtimellm_trainer import VTimeLLMTrainer
from vtimellm.train.dataset import make_supervised_data_module, DataArguments
from vtimellm.model import VTimeLLMLlamaForCausalLM, VTimeLLMChatGLMForCausalLM
from vtimellm.model.builder import load_lora
from vtimellm.mm_utils import print_trainable_parameters
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from transformers import BitsAndBytesConfig
from peft import prepare_model_for_kbit_training
from peft import LoraConfig, get_peft_model
from peft.tuners.lora import LoraLayer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
sys.path.append(root_dir)
local_rank = None
def rank0_print(*args):
if local_rank == 0:
print(*args)
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="lmsys/vicuna-7b-v1.5")
stage2_path: Optional[str] = field(default=None)
version: Optional[str] = field(default="v0")
tune_mm_mlp_adapter: bool = field(default=False)
pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
@dataclass
class TrainingArguments(transformers.TrainingArguments):
training_stage: int = field(default=2)
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
remove_unused_columns: bool = field(default=False)
freeze_mm_mlp_adapter: bool = field(default=False)
model_max_length: int = field(
default=512,
metadata={
"help":
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
double_quant: bool = field(
default=True,
metadata={"help": "Compress the quantization statistics through double quantization."}
)
quant_type: str = field(
default="nf4",
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
)
bits: int = field(
default=16,
metadata={"help": "How many bits to use."}
)
lora_enable: bool = False
lora_r: int = 64
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_weight_path: str = ""
lora_bias: str = "none"
def maybe_zero_3(param, ignore_status=False, name=None):
if hasattr(param, "ds_id"):
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if not ignore_status:
logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}")
with zero.GatheredParameters([param]):
param = param.data.detach().cpu().clone()
else:
param = param.detach().cpu().clone()
return param
# Borrowed from peft.utils.get_peft_model_state_dict
def get_peft_state_maybe_zero_3(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: maybe_zero_3(v, name=k) for k, v in to_return.items()}
return to_return
def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
to_return = {k: t for k, t in named_params if "lora_" not in k}
if require_grad_only:
to_return = {k: t for k, t in to_return.items() if t.requires_grad}
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
return to_return
def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
return to_return
def find_all_linear_names(model):
cls = torch.nn.Linear
| lora_module_names = set() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: moonbow721/DPoser
# Path: lib/algorithms/advanced/sde_lib.py
class SDE(abc.ABC):
class RSDE(self.__class__):
class VPSDE(SDE):
class subVPSDE(SDE):
class VESDE(SDE):
def __init__(self, N):
def T(self):
def sde(self, x, t):
def marginal_prob(self, x, t):
def prior_sampling(self, shape):
def prior_logp(self, z):
def discretize(self, x, t):
def return_alpha_sigma(self, t):
def reverse(self, score_fn, probability_flow=False):
def __init__(self):
def T(self):
def sde(self, x, t, condition=None, mask=None, guide=False):
def discretize(self, x, t, condition=None, mask=None):
def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):
def T(self):
def sde(self, x, t):
def marginal_prob(self, x, t):
def prior_sampling(self, shape):
def prior_logp(self, z):
def discretize(self, x, t):
def return_alpha_sigma(self, t):
def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):
def T(self):
def sde(self, x, t):
def marginal_prob(self, x, t):
def prior_sampling(self, shape):
def prior_logp(self, z):
def return_alpha_sigma(self, t):
def __init__(self, sigma_min=0.01, sigma_max=50, N=1000, T=1):
def T(self):
def sde(self, x, t):
def marginal_prob(self, x, t):
def prior_sampling(self, shape):
def prior_logp(self, z):
def discretize(self, x, t):
def return_alpha_sigma(self, t):
G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device))
N = self.N
T = self.T
N = np.prod(shape[1:])
G = sqrt_beta
N = np.prod(shape[1:])
N = np.prod(shape[1:])
G = torch.sqrt(sigma ** 2 - adjacent_sigma ** 2)
# Path: lib/algorithms/advanced/sampling.py
_CORRECTORS = {}
_PREDICTORS = {}
def register_predictor(cls=None, *, name=None):
def _register(cls):
def register_corrector(cls=None, *, name=None):
def _register(cls):
def get_predictor(name):
def get_corrector(name):
def get_sampling_fn(config, sde, shape, inverse_scaler, eps, device=None):
def __init__(self, sde, score_fn, probability_flow=False):
def update_fn(self, x, t, observation, mask):
def __init__(self, sde, score_fn, snr, n_steps):
def update_fn(self, x, t, observation, mask):
def __init__(self, sde, score_fn, probability_flow=False):
def update_fn(self, x, t, observation, mask):
def update_fn_guide(self, x_t, t, observation, mask, condition=None, grad_step=1.0):
def __init__(self, sde, score_fn, probability_flow=False):
def update_fn(self, x, t):
def __init__(self, sde, score_fn, probability_flow=False):
def vesde_update_fn(self, x, t):
def vpsde_update_fn(self, x, t):
def update_fn(self, x, t):
def __init__(self, sde, score_fn, probability_flow=False):
def update_fn(self, x, t, observation, mask):
def __init__(self, sde, score_fn, snr, n_steps):
def update_fn(self, x, t, observation, mask):
def __init__(self, sde, score_fn, snr, n_steps):
def update_fn(self, x, t, observation, mask):
def __init__(self, sde, score_fn, snr, n_steps):
def update_fn(self, x, t, observation, mask):
def shared_predictor_update_fn(x, t, observation, mask, sde, model, predictor, probability_flow, continuous):
def shared_corrector_update_fn(x, t, observation, mask, sde, model, corrector, continuous, snr, n_steps):
def get_pc_sampler(sde, shape, predictor, corrector, inverse_scaler, snr,
n_steps=1, probability_flow=False, continuous=False,
denoise=True, eps=1e-3, device='cuda'):
def get_imputation_update_fn(update_fn):
def imputation_update_fn(x, vec_t, observation, mask, model, args):
def pc_sampler(model, observation=None, mask=None, z=None, start_step=0, args=None):
def get_ode_sampler(sde, shape, inverse_scaler,
denoise=False, rtol=1e-5, atol=1e-5,
method='RK45', eps=1e-3, device='cuda'):
def denoise_update_fn(model, x):
def drift_fn(model, x, t):
def ode_sampler(model, z=None):
def ode_func(t, x):
class Predictor(abc.ABC):
class Corrector(abc.ABC):
class EulerMaruyamaPredictor(Predictor):
class ReverseDiffusionPredictor(Predictor):
class AncestralSamplingPredictor(Predictor):
class NonePredictor(Predictor):
class LangevinCorrector(Corrector):
class AnnealedLangevinDynamics(Corrector):
class NoneCorrector(Corrector):
# Path: lib/algorithms/advanced/utils.py
_MODELS = {}
def register_model(cls=None, *, name=None):
def _register(cls):
def get_model(name):
def get_sigmas(config):
def get_ddpm_params(config):
def create_model(config):
def get_model_fn(model, train=False):
def model_fn(x, labels, condition, mask):
def get_score_fn(sde, model, train=False, continuous=False):
def score_fn(x, t, condition, mask):
def score_fn(x, t, condition, mask):
def to_flattened_numpy(x):
def from_flattened_numpy(x, shape):
# Path: lib/algorithms/advanced/model.py
class ScoreModelFC(nn.Module):
"""
Independent condition feature projection layers for each block
"""
def __init__(self, config, n_poses=21, pose_dim=6, hidden_dim=64,
embed_dim=32, n_blocks=2):
super(ScoreModelFC, self).__init__()
self.config = config
self.n_poses = n_poses
self.joint_dim = pose_dim
self.n_blocks = n_blocks
self.act = get_act(config)
self.pre_dense = nn.Linear(n_poses * pose_dim, hidden_dim)
self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)
self.pre_dense_cond = nn.Linear(hidden_dim, hidden_dim)
self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)
self.dropout = nn.Dropout(p=config.model.dropout)
# time embedding
self.time_embedding_type = config.model.embedding_type.lower()
if self.time_embedding_type == 'fourier':
self.gauss_proj = GaussianFourierProjection(embed_dim=embed_dim, scale=config.model.fourier_scale)
elif self.time_embedding_type == 'positional':
self.posit_proj = functools.partial(get_timestep_embedding, embedding_dim=embed_dim)
else:
assert 0
self.shared_time_embed = nn.Sequential(
nn.Linear(embed_dim, embed_dim),
self.act,
)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config), dtype=torch.float))
for idx in range(n_blocks):
setattr(self, f'b{idx + 1}_dense1', nn.Linear(hidden_dim, hidden_dim))
setattr(self, f'b{idx + 1}_dense1_t', nn.Linear(embed_dim, hidden_dim))
setattr(self, f'b{idx + 1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))
setattr(self, f'b{idx + 1}_dense2', nn.Linear(hidden_dim, hidden_dim))
setattr(self, f'b{idx + 1}_dense2_t', nn.Linear(embed_dim, hidden_dim))
setattr(self, f'b{idx + 1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))
self.post_dense = nn.Linear(hidden_dim, n_poses * pose_dim)
def forward(self, batch, t, condition=None, mask=None):
"""
batch: [B, j*3] or [B, j*6]
t: [B]
Return: [B, j*3] or [B, j*6] same dim as batch
"""
bs = batch.shape[0]
# batch = batch.view(bs, -1) # [B, j*3]
# time embedding
if self.time_embedding_type == 'fourier':
# Gaussian Fourier features embeddings.
used_sigmas = t
temb = self.gauss_proj(torch.log(used_sigmas))
elif self.time_embedding_type == 'positional':
# Sinusoidal positional embeddings.
timesteps = t
used_sigmas = self.sigmas[t.long()]
temb = self.posit_proj(timesteps)
else:
raise ValueError(f'time embedding type {self.time_embedding_type} unknown.')
temb = self.shared_time_embed(temb)
h = self.pre_dense(batch)
h += self.pre_dense_t(temb)
h = self.pre_gnorm(h)
h = self.act(h)
h = self.dropout(h)
for idx in range(self.n_blocks):
h1 = getattr(self, f'b{idx + 1}_dense1')(h)
h1 += getattr(self, f'b{idx + 1}_dense1_t')(temb)
h1 = getattr(self, f'b{idx + 1}_gnorm1')(h1)
h1 = self.act(h1)
# dropout, maybe
h1 = self.dropout(h1)
h2 = getattr(self, f'b{idx + 1}_dense2')(h1)
h2 += getattr(self, f'b{idx + 1}_dense2_t')(temb)
h2 = getattr(self, f'b{idx + 1}_gnorm2')(h2)
h2 = self.act(h2)
# dropout, maybe
h2 = self.dropout(h2)
h = h + h2
res = self.post_dense(h) # [B, j*3]
''' normalize the output '''
if self.config.model.scale_by_sigma:
used_sigmas = used_sigmas.reshape((bs, 1))
res = res / used_sigmas
return res
# Path: lib/algorithms/ema.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay=0.999, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach()
for p in parameters if p.requires_grad]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def state_dict(self):
return dict(decay=self.decay, num_updates=self.num_updates,
shadow_params=self.shadow_params)
def load_state_dict(self, state_dict):
self.decay = state_dict['decay']
self.num_updates = state_dict['num_updates']
self.shadow_params = state_dict['shadow_params']
# Path: lib/body_model/constants.py
SMPL_MEAN_PATH = join(curr_dir, './smpl_mean_params.npz')
BEND_POSE_PATH = join(curr_dir, '../data/bend_pose.npz')
CROP_IMG_HEIGHT = 256
CROP_IMG_WIDTH = 192
CROP_ASPECT_RATIO = CROP_IMG_HEIGHT / float(CROP_IMG_WIDTH)
IMG_NORM_MEAN = [0.485, 0.456, 0.406]
IMG_NORM_STD = [0.229, 0.224, 0.225]
FOCAL_LENGTH = 5000.
IMG_RES = 224
JOINT_NAMES = [
# 25 OpenPose joints (in the order provided by OpenPose)
'OP Nose',
'OP Neck',
'OP RShoulder',
'OP RElbow',
'OP RWrist',
'OP LShoulder',
'OP LElbow',
'OP LWrist',
'OP MidHip',
'OP RHip',
'OP RKnee',
'OP RAnkle',
'OP LHip',
'OP LKnee',
'OP LAnkle',
'OP REye',
'OP LEye',
'OP REar',
'OP LEar',
'OP LBigToe',
'OP LSmallToe',
'OP LHeel',
'OP RBigToe',
'OP RSmallToe',
'OP RHeel',
# 24 Ground Truth joints (superset of joints from different datasets)
'Right Ankle',
'Right Knee',
'Right Hip',
'Left Hip',
'Left Knee',
'Left Ankle',
'Right Wrist',
'Right Elbow',
'Right Shoulder',
'Left Shoulder',
'Left Elbow',
'Left Wrist',
'Neck (LSP)',
'Top of Head (LSP)',
'Pelvis (MPII)',
'Thorax (MPII)',
'Spine (H36M)',
'Jaw (H36M)',
'Head (H36M)',
'Nose',
'Left Eye',
'Right Eye',
'Left Ear',
'Right Ear'
]
JOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))}
JOINT_MAP = {
'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,
'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,
'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,
'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,
'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,
'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,
'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,
'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,
'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,
'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,
'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,
'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,
'Neck (LSP)': 47, 'Top of Head (LSP)': 48,
'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,
'Spine (H36M)': 51, 'Jaw (H36M)': 52,
'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,
'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27
}
H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
H36M_TO_J14 = H36M_TO_J17[:14]
J24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]
J24_TO_J14 = J24_TO_J17[:14]
SMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]
SMPL_POSE_FLIP_PERM = []
J24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]
J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\
+ [25+i for i in J24_FLIP_PERM]
# Path: lib/body_model/fitting_losses.py
def camera_fitting_loss(model_joints, camera_t, camera_t_est, camera_center, joints_2d, joints_conf,
focal_length=5000, depth_loss_weight=100):
"""
Loss function for camera optimization.
"""
# Project model joints
batch_size = model_joints.shape[0]
rotation = torch.eye(3, device=model_joints.device).unsqueeze(0).expand(batch_size, -1, -1)
projected_joints = perspective_projection(model_joints, rotation, camera_t,
focal_length, camera_center)
op_joints = ['OP RHip', 'OP LHip', 'OP RShoulder', 'OP LShoulder']
op_joints_ind = [constants.JOINT_IDS[joint] for joint in op_joints]
gt_joints = ['Right Hip', 'Left Hip', 'Right Shoulder', 'Left Shoulder']
gt_joints_ind = [constants.JOINT_IDS[joint] for joint in gt_joints]
reprojection_error_op = (joints_2d[:, op_joints_ind] -
projected_joints[:, op_joints_ind]) ** 2
reprojection_error_gt = (joints_2d[:, gt_joints_ind] -
projected_joints[:, gt_joints_ind]) ** 2
# Check if for each example in the batch all 4 OpenPose detections are valid, otherwise use the GT detections
# OpenPose joints are more reliable for this task, so we prefer to use them if possible
is_valid = (joints_conf[:, op_joints_ind].min(dim=-1)[0][:, None, None] > 0).float()
reprojection_loss = (is_valid * reprojection_error_op + (1 - is_valid) * reprojection_error_gt).sum(dim=(1, 2))
# Loss that penalizes deviation from depth estimate
depth_loss = (depth_loss_weight ** 2) * (camera_t[:, 2] - camera_t_est[:, 2]) ** 2
total_loss = reprojection_loss + depth_loss
return total_loss.sum()
# Path: lib/body_model/fitting_losses.py
def body_fitting_loss(body_pose, betas, model_joints, camera_t, camera_center,
joints_2d, joints_conf, pose_prior, quan_t,
focal_length=5000, sigma=100, pose_prior_weight=4.78,
shape_prior_weight=5, angle_prior_weight=15.2,
output='mean', verbose=True):
"""
Loss function for body fitting
"""
batch_size = body_pose.shape[0]
rotation = torch.eye(3, device=body_pose.device).unsqueeze(0).expand(batch_size, -1, -1)
projected_joints = perspective_projection(model_joints, rotation, camera_t,
focal_length, camera_center)
# Weighted robust reprojection error
reprojection_error = gmof(projected_joints - joints_2d, sigma)
reprojection_loss = (joints_conf ** 2) * reprojection_error.sum(dim=-1) # sum along x-y
# Pose prior loss
if pose_prior is not None:
pose_prior_loss = (pose_prior_weight ** 2) * pose_prior(body_pose, betas, quan_t)
else:
pose_prior_loss = 0.0
# Angle prior for knees and elbows
angle_prior_loss = (angle_prior_weight ** 2) * angle_prior(body_pose).sum(dim=-1)
# Regularizer to prevent betas from taking large values
shape_prior_loss = (shape_prior_weight ** 2) * (betas ** 2).sum(dim=-1)
# sum along different joints
total_loss = reprojection_loss.sum(dim=-1) + pose_prior_loss + angle_prior_loss + shape_prior_loss
if verbose:
print(f"Reprojection Loss: {reprojection_loss.sum(dim=-1).mean().item():.2f}")
print(f"Angle Prior Loss: {angle_prior_loss.mean().item():.2f}")
print(f"Shape Prior Loss: {shape_prior_loss.mean().item():.2f}")
if pose_prior is not None:
print(f"Pose Prior Loss: {pose_prior_loss.mean().item():.2f}")
if output == 'sum':
return total_loss.sum()
elif output == 'reprojection':
return reprojection_loss
else:
return total_loss.mean() # mean along batch
# Path: lib/dataset/AMASS.py
N_POSES = 21
# Path: lib/dataset/AMASS.py
class Posenormalizer:
def __init__(self, data_path, device='cuda:0', normalize=True, min_max=True, rot_rep=None):
assert rot_rep in ['rot6d', 'axis']
self.normalize = normalize
self.min_max = min_max
self.rot_rep = rot_rep
normalize_params = torch.load(os.path.join(data_path, '{}_normalize1.pt'.format(rot_rep)))
self.min_poses, self.max_poses = normalize_params['min_poses'].to(device), normalize_params['max_poses'].to(device)
normalize_params = torch.load(os.path.join(data_path, '{}_normalize2.pt'.format(rot_rep)))
self.mean_poses, self.std_poses = normalize_params['mean_poses'].to(device), normalize_params['std_poses'].to(device)
def offline_normalize(self, poses, from_axis=False):
assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]
pose_shape = poses.shape
if from_axis and self.rot_rep == 'rot6d':
poses = axis_angle_to_rot6d(poses.reshape(-1, 3)).reshape(*pose_shape[:-1], -1)
if not self.normalize:
return poses
if self.min_max:
min_poses = self.min_poses.view(1, -1)
max_poses = self.max_poses.view(1, -1)
if len(poses.shape) == 3: # [t, b, data_dim]
min_poses = min_poses.unsqueeze(0)
max_poses = max_poses.unsqueeze(0)
normalized_poses = 2 * (poses - min_poses) / (max_poses - min_poses) - 1
else:
mean_poses = self.mean_poses.view(1, -1)
std_poses = self.std_poses.view(1, -1)
if len(poses.shape) == 3: # [t, b, data_dim]
mean_poses = mean_poses.unsqueeze(0)
std_poses = std_poses.unsqueeze(0)
normalized_poses = (poses - mean_poses) / std_poses
return normalized_poses
def offline_denormalize(self, poses, to_axis=False):
assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]
if not self.normalize:
denormalized_poses = poses
else:
if self.min_max:
min_poses = self.min_poses.view(1, -1)
max_poses = self.max_poses.view(1, -1)
if len(poses.shape) == 3: # [t, b, data_dim]
min_poses = min_poses.unsqueeze(0)
max_poses = max_poses.unsqueeze(0)
denormalized_poses = 0.5 * ((poses + 1) * (max_poses - min_poses) + 2 * min_poses)
else:
mean_poses = self.mean_poses.view(1, -1)
std_poses = self.std_poses.view(1, -1)
if len(poses.shape) == 3: # [t, b, data_dim]
mean_poses = mean_poses.unsqueeze(0)
std_poses = std_poses.unsqueeze(0)
denormalized_poses = poses * std_poses + mean_poses
if to_axis and self.rot_rep == 'rot6d':
pose_shape = denormalized_poses.shape
denormalized_poses = rot6d_to_axis_angle(denormalized_poses.reshape(-1, 6)).reshape(*pose_shape[:-1], -1)
return denormalized_poses
# Path: lib/utils/generic.py
def import_configs(config_path):
module_name, function_name = config_path.rsplit('.', 1)
config_module = importlib.import_module(module_name)
get_config = getattr(config_module, function_name)
config = get_config()
return config
# Path: lib/utils/misc.py
def linear_interpolation(A, B, frames):
alpha = torch.linspace(0, 1, frames, device=A.device)[:, None]
interpolated = (1 - alpha) * A + alpha * B
return interpolated
# Path: run/smplify.py
import math
import torch
from torch import nn
from tqdm import tqdm
from lib.algorithms.advanced import sde_lib, sampling
from lib.algorithms.advanced import utils as mutils
from lib.algorithms.advanced.model import ScoreModelFC
from lib.algorithms.ema import ExponentialMovingAverage
from lib.body_model import constants
from lib.body_model.fitting_losses import camera_fitting_loss, body_fitting_loss
from lib.dataset.AMASS import N_POSES, Posenormalizer
from lib.utils.generic import import_configs
from lib.utils.misc import linear_interpolation
class DPoser(nn.Module):
def __init__(self, batch_size=32, config_path='', args=None):
super().__init__()
self.device = args.device
| self.batch_size = batch_size |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: insuhan/hyper-attn
# Path: models/attention/flash_attn_triton_for_hyper.py
def _fwd_kernel(
Q,
K,
V,
Bias,
Out,
Lse,
TMP, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_ob,
stride_oh,
stride_om,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _bwd_preprocess_do_o_dot(
Out,
DO,
Delta,
stride_ob,
stride_oh,
stride_om,
stride_dob,
stride_doh,
stride_dom,
nheads,
seqlen_q,
seqlen_q_rounded,
headdim,
BLOCK_M: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
):
def _bwd_store_dx(
dx_ptrs,
dx,
offs_n,
offs_d,
seqlen,
headdim,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
even_headdim,
):
def _bwd_kernel_one_col_block(
start_n,
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_bm,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
seqlen_q,
seqlen_k,
headdim,
ATOMIC_ADD: tl.constexpr,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def init_to_zero(name):
def _bwd_kernel(
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_dob,
stride_doh,
stride_dom,
stride_dqb,
stride_dqh,
stride_dqm,
stride_dkb,
stride_dkh,
stride_dkn,
stride_dvb,
stride_dvh,
stride_dvn,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
SEQUENCE_PARALLEL: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
def _flash_attn_backward(
do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None
):
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
def backward(ctx, do, dlse_use_needed=None):
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
BLOCK = 128
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
class FlashAttnFunc(torch.autograd.Function):
# Path: models/attention/hyper_attn.py
class HyperAttention(torch.nn.Module):
def __init__(self, input_dim=64, lsh_num_projs=7, block_size=256, sample_size=256, min_seq_len=4096, cuda=False):
super().__init__()
self.input_dim = input_dim
self.lsh_num_projs = lsh_num_projs
self.block_size = block_size
self.sample_size = sample_size
self.min_seq_len = min_seq_len
self.cuda = cuda
self.lsh = AngularLSH(num_projs=self.lsh_num_projs, dim=(1, 1, input_dim))
def forward(self, query: torch.tensor, key: torch.tensor, value: torch.tensor, scale=None, causal=False, return_lse=False):
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
n_query = query.shape[2]
batch_size, n_heads, n_key, dim = key.shape
scale = dim ** (-0.5) if scale is None else scale
# Without causal masking
if not causal:
attn, lse = self.forward_no_causal_mask(query, key, value, scale)
# With causal masking
else:
if n_key <= self.min_seq_len:
if self.cuda:
attn, lse = exact_attention_cuda(query, key, value, scale, causal=True)
else:
attn, lse = exact_attention(query, key, value, scale, causal=True)
else:
# If n_query is odd we pad inputs by adding all-zero rows
if n_query % 2:
query = torch.nn.functional.pad(query, (0,0,0,1), mode='constant',value=0.)
key = torch.nn.functional.pad(key, (0,0,0,1), mode='constant',value=0.)
value = torch.nn.functional.pad(value, (0,0,0,1), mode='constant',value=0.)
q_bd = query.view(batch_size, 2*n_heads, query.shape[2]//2, query.shape[-1])
k_bd = key.view(batch_size, 2*n_heads, key.shape[2]//2, key.shape[-1])
v_bd = value.view(batch_size, 2*n_heads, key.shape[2]//2, value.shape[-1])
attn_bd, lse_bd = self.forward(q_bd, k_bd, v_bd, scale, True, True)
if attn_bd.shape[2] not in attn_bd.stride():
attn_bd = attn_bd.contiguous()
attn_bd = attn_bd.view(batch_size, n_heads, -1, dim)
if lse_bd.shape[2] not in lse_bd.stride():
lse_bd = lse_bd.contiguous()
lse_bd = lse_bd.view(batch_size, n_heads, -1, 1)
attn_unmasked, lse_unmasked = self.forward_no_causal_mask(
query[:, :, key.shape[2]//2:, :],
key[:, :, :key.shape[2]//2, :],
value[:, :, :key.shape[2]//2, :], scale)
attn_up, lse_up = attn_bd[:,:,:query.shape[2]//2,:], lse_bd[:,:,:query.shape[2]//2,:]
attn_down, lse_down = add_self_attentions(
attn_bd[:,:,query.shape[2]//2:,:],
lse_bd[:,:,query.shape[2]//2:,:],
attn_unmasked,
lse_unmasked)
attn = torch.cat((attn_up, attn_down), dim=-2)
lse = torch.cat((lse_up, lse_down), dim=-2)
# If n_query was odd exclude the last rows
if n_query % 2:
attn = attn[:,:,:-1,:]
lse = lse[:,:,:-1,:]
if not return_lse:
return attn
else:
return attn, lse
def forward_no_causal_mask(self, query, key, value, scale):
batch_size, head_size, n_query, dim = query.shape
n_key = key.shape[2]
if self.min_seq_len > n_query:
if self.cuda:
return exact_attention_cuda(query, key, value, scale, causal=False)
else:
return exact_attention(query, key, value, scale, causal=False)
# 1. Sorted block-diagonal via sortLSH
_, query_sort_idx = torch.sort(self.lsh.hash(query), dim=2, stable=True) # batch_size x head_size x n
_, key_sort_idx = torch.sort(self.lsh.hash(key), dim=2, stable=True)
query_sort_idx_inv = torch.argsort(query_sort_idx, dim=2, stable=True) # for recovering the row order
key_block_size = self.block_size
query_sorted = indexing(query, query_sort_idx, key_block_size)
key_sorted = indexing(key, key_sort_idx, key_block_size)
value_sorted = indexing(value, key_sort_idx, key_block_size)
if key_block_size > 0:
num_blocks = key_sorted.shape[2] // key_block_size
query_block_size = query_sorted.shape[2] // num_blocks
# Reshape tensors to [batch_size*head_size, 1, block_size, dim] as Flash-attn only allows 4d-tensors
query_split_per_block = query_sorted.view(-1, 1, query_block_size, dim)
key_split_per_block = key_sorted.view(-1, 1, key_block_size, dim)
value_split_per_block = value_sorted.view(-1, 1, key_block_size, dim)
if self.cuda:
attn_block, lse_block = exact_attention_cuda(
query_split_per_block, key_split_per_block, value_split_per_block,
softmax_scale=scale, causal=False)
else:
attn_block, lse_block = exact_attention(
query_split_per_block, key_split_per_block, value_split_per_block,
softmax_scale=scale, causal=False)
if attn_block.shape[2] not in attn_block.stride():
attn_block = attn_block.contiguous()
attn_block = attn_block.view(batch_size, head_size, query_sorted.shape[2], -1)
if lse_block.shape[2] not in lse_block.stride():
lse_block = lse_block.contiguous()
lse_block = lse_block.view(batch_size, head_size, query_sorted.shape[2], -1)
# When inputs are padded, then unpad them
if query_sorted.shape[2] != n_query: #query.shape[2]:
attn_block, lse_block = attn_block[:,:,:n_query,:], lse_block[:,:,:n_query,:]
query_sorted = query_sorted[:,:,:n_query,:]
key_sorted = key_sorted[:,:,:n_key,:]
value_sorted = value_sorted[:,:,:n_key,:]
else:
query_block_size = -1
query_block_size = -1
attn_block, lse_block = 0, 0
# 2. Residual low-rank part via uniform sampling
# Sample indices uniformly at random
sample_size = self.sample_size
if sample_size > 0 and (n_query > query_block_size) and (n_key > key_block_size):
sampled_set = torch.randint(n_key, size=(batch_size, head_size, sample_size), device=query_sorted.device)
# Compute mask for hiding A_ij computed in block-diagonal attention
offset_n = rearrange(torch.arange(n_query, device=query_sorted.device), 'n -> 1 n 1')
weights = n_key / sample_size
value_subset = indexing(value_sorted, sampled_set)
key_subset = indexing(key_sorted, sampled_set)
if not self.cuda:
block_mask = (offset_n // query_block_size) == (sampled_set // key_block_size).view(-1, 1, sample_size)
block_mask = block_mask.view(batch_size, head_size, -1, sample_size)
block_mask = block_mask.to(query_sorted.dtype)
block_mask *= torch.finfo(query_sorted.dtype).min # adding -inf added to QK^T
attn_res, lse_res = exact_attention(query_sorted, key_subset, value_subset, scale, causal=False, bias=block_mask)
else:
attn_res, lse_res = exact_attention_cuda(query_sorted, key_subset, value_subset, scale, causal=False)
lse_res = lse_res + math.log(weights)
# Add two attentions
if key_block_size > 0:
attn, lse = add_self_attentions(attn_block, lse_block, attn_res, lse_res)
else:
attn, lse = attn_res, lse_res
else:
attn, lse = attn_block, lse_block
# Re-order rows with the inverse order for query_sorted -> query
attn = indexing(attn, query_sort_idx_inv)
lse = indexing(lse, query_sort_idx_inv)
return attn, lse
# Path: benchmark_single_attention.py
import os
import argparse
import torch
import triton
from tqdm import tqdm
from models.attention.flash_attn_triton_for_hyper import flash_attn_func
from models.attention.hyper_attn import HyperAttention
from flash_attn import flash_attn_func as flash_attn_func_cuda
try:
except ImportError:
flash_attn_func_cuda = None
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--no_causal", action="store_true")
parser.add_argument("--mode", type=str, default="fwd+bwd", choices=['fwd', 'bwd', 'fwd+bwd'])
parser.add_argument("--attn_method", type=str, default="flash", choices=['flash', 'flash-cuda', 'hyper', 'hyper-cuda'])
return parser.parse_args()
def get_tensors(batch_size, seq_len, head_size, dim):
q = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
k = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
v = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
return q, k, v
def run_flash_attn(batch_size, head_size, seq_len, dim, causal, mode, impl="triton", warmup=20, rep=100):
q, k, v = get_tensors(batch_size, seq_len, head_size, dim)
if impl == "cuda":
if flash_attn_func_cuda is None:
raise ImportError("Please install flash_attn (pip install flash-attn --no-build-isolation)")
fn = lambda: flash_attn_func_cuda(q, k, v, causal=causal)
else:
fn = lambda: flash_attn_func(q, k, v, None, causal, None)[0]
if mode == 'fwd':
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, percentiles=[0.2, 0.5, 0.8])
elif mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, percentiles=[0.2, 0.5, 0.8])
| else: # mode == 'fwd+bwd' |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: raven38/EfficientDynamic3DGaussian
# Path: scene/colmap_loader.py
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
# Path: scene/colmap_loader.py
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
# Path: scene/colmap_loader.py
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
# Path: scene/colmap_loader.py
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
# Path: scene/colmap_loader.py
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
num_points = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
num_points += 1
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
count = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
xyzs[count] = xyz
rgbs[count] = rgb
errors[count] = error
count += 1
return xyzs, rgbs, errors
# Path: utils/graphics_utils.py
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt)
# Path: utils/graphics_utils.py
def focal2fov(focal, pixels):
return 2*math.atan(pixels/(2*focal))
# Path: utils/graphics_utils.py
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
# Path: utils/sh_utils.py
def SH2RGB(sh):
return sh * C0 + 0.5
# Path: scene/gaussian_model.py
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
def __init__(self, sh_degree : int, L: int):
def capture(self):
def restore(self, model_args, training_args):
def get_scaling(self):
def get_rotation(self):
def get_xyz(self):
def get_features(self):
def get_opacity(self):
def get_covariance(self, scaling_modifier = 1):
def oneupSHdegree(self):
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
def training_setup(self, training_args):
def update_learning_rate(self, iteration):
def construct_list_of_attributes(self):
def save_ply(self, path):
def reset_opacity(self):
def load_ply(self, path):
def replace_tensor_to_optimizer(self, tensor, name):
def _prune_optimizer(self, mask):
def prune_points(self, mask):
def cat_tensors_to_optimizer(self, tensors_dict):
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
def densify_and_clone(self, grads, grad_threshold, scene_extent):
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
def add_densification_stats(self, viewspace_point_tensor, update_filter):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
# Path: scene/hyper_camera.py
class Camera:
"""Class to handle camera geometry."""
def __init__(self,
orientation: np.ndarray,
position: np.ndarray,
focal_length: Union[np.ndarray, float],
principal_point: np.ndarray,
image_size: np.ndarray,
skew: Union[np.ndarray, float] = 0.0,
pixel_aspect_ratio: Union[np.ndarray, float] = 1.0,
radial_distortion: Optional[np.ndarray] = None,
tangential_distortion: Optional[np.ndarray] = None,
dtype=np.float32):
"""Constructor for camera class."""
if radial_distortion is None:
radial_distortion = np.array([0.0, 0.0, 0.0], dtype)
if tangential_distortion is None:
tangential_distortion = np.array([0.0, 0.0], dtype)
self.orientation = np.array(orientation, dtype)
self.position = np.array(position, dtype)
self.focal_length = np.array(focal_length, dtype)
self.principal_point = np.array(principal_point, dtype)
self.skew = np.array(skew, dtype)
self.pixel_aspect_ratio = np.array(pixel_aspect_ratio, dtype)
self.radial_distortion = np.array(radial_distortion, dtype)
self.tangential_distortion = np.array(tangential_distortion, dtype)
self.image_size = np.array(image_size, np.uint32)
self.dtype = dtype
@classmethod
def from_json(cls, path: PathType):
"""Loads a JSON camera into memory."""
with open(path, 'r') as fp:
camera_json = json.load(fp)
# Fix old camera JSON.
if 'tangential' in camera_json:
camera_json['tangential_distortion'] = camera_json['tangential']
return cls(
orientation=np.asarray(camera_json['orientation']),
position=np.asarray(camera_json['position']),
focal_length=camera_json['focal_length'],
principal_point=np.asarray(camera_json['principal_point']),
skew=camera_json['skew'],
pixel_aspect_ratio=camera_json['pixel_aspect_ratio'],
radial_distortion=np.asarray(camera_json['radial_distortion']),
tangential_distortion=np.asarray(camera_json['tangential_distortion']),
image_size=np.asarray(camera_json['image_size']),
)
def to_json(self):
return {
k: (v.tolist() if hasattr(v, 'tolist') else v)
for k, v in self.get_parameters().items()
}
def get_parameters(self):
return {
'orientation': self.orientation,
'position': self.position,
'focal_length': self.focal_length,
'principal_point': self.principal_point,
'skew': self.skew,
'pixel_aspect_ratio': self.pixel_aspect_ratio,
'radial_distortion': self.radial_distortion,
'tangential_distortion': self.tangential_distortion,
'image_size': self.image_size,
}
@property
def scale_factor_x(self):
return self.focal_length
@property
def scale_factor_y(self):
return self.focal_length * self.pixel_aspect_ratio
@property
def principal_point_x(self):
return self.principal_point[0]
@property
def principal_point_y(self):
return self.principal_point[1]
@property
def has_tangential_distortion(self):
return any(self.tangential_distortion != 0.0)
@property
def has_radial_distortion(self):
return any(self.radial_distortion != 0.0)
@property
def image_size_y(self):
return self.image_size[1]
@property
def image_size_x(self):
return self.image_size[0]
@property
def image_shape(self):
return self.image_size_y, self.image_size_x
@property
def optical_axis(self):
return self.orientation[2, :]
@property
def translation(self):
return -np.matmul(self.orientation, self.position)
def pixel_to_local_rays(self, pixels: np.ndarray):
"""Returns the local ray directions for the provided pixels."""
y = ((pixels[..., 1] - self.principal_point_y) / self.scale_factor_y)
x = ((pixels[..., 0] - self.principal_point_x - y * self.skew) /
self.scale_factor_x)
if self.has_radial_distortion or self.has_tangential_distortion:
x, y = _radial_and_tangential_undistort(
x,
y,
k1=self.radial_distortion[0],
k2=self.radial_distortion[1],
k3=self.radial_distortion[2],
p1=self.tangential_distortion[0],
p2=self.tangential_distortion[1])
dirs = np.stack([x, y, np.ones_like(x)], axis=-1)
return dirs / np.linalg.norm(dirs, axis=-1, keepdims=True)
def pixels_to_rays(self, pixels: np.ndarray) -> np.ndarray:
"""Returns the rays for the provided pixels.
Args:
pixels: [A1, ..., An, 2] tensor or np.array containing 2d pixel positions.
Returns:
An array containing the normalized ray directions in world coordinates.
"""
if pixels.shape[-1] != 2:
raise ValueError('The last dimension of pixels must be 2.')
if pixels.dtype != self.dtype:
raise ValueError(f'pixels dtype ({pixels.dtype!r}) must match camera '
f'dtype ({self.dtype!r})')
batch_shape = pixels.shape[:-1]
pixels = np.reshape(pixels, (-1, 2))
local_rays_dir = self.pixel_to_local_rays(pixels)
rays_dir = np.matmul(self.orientation.T, local_rays_dir[..., np.newaxis])
rays_dir = np.squeeze(rays_dir, axis=-1)
# Normalize rays.
rays_dir /= np.linalg.norm(rays_dir, axis=-1, keepdims=True)
rays_dir = rays_dir.reshape((*batch_shape, 3))
return rays_dir
def pixels_to_points(self, pixels: np.ndarray, depth: np.ndarray):
rays_through_pixels = self.pixels_to_rays(pixels)
cosa = np.matmul(rays_through_pixels, self.optical_axis)
points = (
rays_through_pixels * depth[..., np.newaxis] / cosa[..., np.newaxis] +
self.position)
return points
def points_to_local_points(self, points: np.ndarray):
translated_points = points - self.position
local_points = (np.matmul(self.orientation, translated_points.T)).T
return local_points
def project(self, points: np.ndarray):
"""Projects a 3D point (x,y,z) to a pixel position (x,y)."""
batch_shape = points.shape[:-1]
points = points.reshape((-1, 3))
local_points = self.points_to_local_points(points)
# Get normalized local pixel positions.
x = local_points[..., 0] / local_points[..., 2]
y = local_points[..., 1] / local_points[..., 2]
r2 = x**2 + y**2
# Apply radial distortion.
distortion = 1.0 + r2 * (
self.radial_distortion[0] + r2 *
(self.radial_distortion[1] + self.radial_distortion[2] * r2))
# Apply tangential distortion.
x_times_y = x * y
x = (
x * distortion + 2.0 * self.tangential_distortion[0] * x_times_y +
self.tangential_distortion[1] * (r2 + 2.0 * x**2))
y = (
y * distortion + 2.0 * self.tangential_distortion[1] * x_times_y +
self.tangential_distortion[0] * (r2 + 2.0 * y**2))
# Map the distorted ray to the image plane and return the depth.
pixel_x = self.focal_length * x + self.skew * y + self.principal_point_x
pixel_y = (self.focal_length * self.pixel_aspect_ratio * y
+ self.principal_point_y)
pixels = np.stack([pixel_x, pixel_y], axis=-1)
return pixels.reshape((*batch_shape, 2))
def get_pixel_centers(self):
"""Returns the pixel centers."""
xx, yy = np.meshgrid(np.arange(self.image_size_x, dtype=self.dtype),
np.arange(self.image_size_y, dtype=self.dtype))
return np.stack([xx, yy], axis=-1) + 0.5
def scale(self, scale: float):
"""Scales the camera."""
if scale <= 0:
raise ValueError('scale needs to be positive.')
new_camera = Camera(
orientation=self.orientation.copy(),
position=self.position.copy(),
focal_length=self.focal_length * scale,
principal_point=self.principal_point.copy() * scale,
skew=self.skew,
pixel_aspect_ratio=self.pixel_aspect_ratio,
radial_distortion=self.radial_distortion.copy(),
tangential_distortion=self.tangential_distortion.copy(),
image_size=np.array((int(round(self.image_size[0] * scale)),
int(round(self.image_size[1] * scale)))),
)
return new_camera
def look_at(self, position, look_at, up, eps=1e-6):
"""Creates a copy of the camera which looks at a given point.
Copies the provided vision_sfm camera and returns a new camera that is
positioned at `camera_position` while looking at `look_at_position`.
Camera intrinsics are copied by this method. A common value for the
up_vector is (0, 1, 0).
Args:
position: A (3,) numpy array representing the position of the camera.
look_at: A (3,) numpy array representing the location the camera
looks at.
up: A (3,) numpy array representing the up direction, whose
projection is parallel to the y-axis of the image plane.
eps: a small number to prevent divides by zero.
Returns:
A new camera that is copied from the original but is positioned and
looks at the provided coordinates.
Raises:
ValueError: If the camera position and look at position are very close
to each other or if the up-vector is parallel to the requested optical
axis.
"""
look_at_camera = self.copy()
optical_axis = look_at - position
norm = np.linalg.norm(optical_axis)
if norm < eps:
raise ValueError('The camera center and look at position are too close.')
optical_axis /= norm
right_vector = np.cross(optical_axis, up)
norm = np.linalg.norm(right_vector)
if norm < eps:
raise ValueError('The up-vector is parallel to the optical axis.')
right_vector /= norm
# The three directions here are orthogonal to each other and form a right
# handed coordinate system.
camera_rotation = np.identity(3)
camera_rotation[0, :] = right_vector
camera_rotation[1, :] = np.cross(optical_axis, right_vector)
camera_rotation[2, :] = optical_axis
look_at_camera.position = position
look_at_camera.orientation = camera_rotation
return look_at_camera
def crop_image_domain(
self, left: int = 0, right: int = 0, top: int = 0, bottom: int = 0):
"""Returns a copy of the camera with adjusted image bounds.
Args:
left: number of pixels by which to reduce (or augment, if negative) the
image domain at the associated boundary.
right: likewise.
top: likewise.
bottom: likewise.
The crop parameters may not cause the camera image domain dimensions to
become non-positive.
Returns:
A camera with adjusted image dimensions. The focal length is unchanged,
and the principal point is updated to preserve the original principal
axis.
"""
crop_left_top = np.array([left, top])
crop_right_bottom = np.array([right, bottom])
new_resolution = self.image_size - crop_left_top - crop_right_bottom
new_principal_point = self.principal_point - crop_left_top
if np.any(new_resolution <= 0):
raise ValueError('Crop would result in non-positive image dimensions.')
new_camera = self.copy()
new_camera.image_size = np.array([int(new_resolution[0]),
int(new_resolution[1])])
new_camera.principal_point = np.array([new_principal_point[0],
new_principal_point[1]])
return new_camera
def copy(self):
return copy.deepcopy(self)
# Path: scene/dataset_readers.py
import torch
import os
import sys
import glob
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
from scene.hyper_camera import Camera as HyperNeRFCamera
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
| width: int |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zd11024/NaviLLM
# Path: tasks/datasets/mp3d_envs.py
class EnvBatch(object):
def __init__(self, connectivity_dir, feat_db=None, batch_size=1):
self.feat_db = feat_db
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.sims = []
for i in range(batch_size):
sim = MatterSim.Simulator()
sim.setNavGraphPath(connectivity_dir)
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.setBatchSize(1)
sim.initialize()
self.sims.append(sim)
def newEpisodes(self, scanIds, viewpointIds, headings):
for i, (scanId, viewpointId, heading) in enumerate(zip(scanIds, viewpointIds, headings)):
self.sims[i].newEpisode([scanId], [viewpointId], [heading], [0])
def getStates(self):
"""
Get list of states augmented with precomputed image features. rgb field will be empty.
Agent's current view [0-35] (set only when viewing angles are discretized)
[0-11] looking down, [12-23] looking at horizon, [24-35] looking up
:return: [ ((36, 2048), sim_state) ] * batch_size
"""
feature_states = []
for i, sim in enumerate(self.sims):
state = sim.getState()[0]
if self.feat_db is None:
feature = None
else:
feature = self.feat_db.get_image_feature(state.scanId, state.location.viewpointId)
feature_states.append((feature, state))
return feature_states
def makeActions(self, actions):
''' Take an action using the full state dependent action interface (with batched input).
Every action element should be an (index, heading, elevation) tuple. '''
for i, (index, heading, elevation) in enumerate(actions):
self.sims[i].makeAction([index], [heading], [elevation])
# Path: tasks/datasets/mp3d_envs.py
def new_simulator(connectivity_dir):
# Simulator image parameters
WIDTH = 640
HEIGHT = 480
VFOV = 60
sim = MatterSim.Simulator()
sim.setNavGraphPath(connectivity_dir)
sim.setRenderingEnabled(False)
sim.setCameraResolution(WIDTH, HEIGHT)
sim.setCameraVFOV(math.radians(VFOV))
sim.setDiscretizedViewingAngles(True)
sim.setBatchSize(1)
sim.initialize()
return sim
# Path: tasks/datasets/mp3d_envs.py
def angle_feature(heading, elevation, angle_feat_size):
return np.array(
[math.sin(heading), math.cos(heading),
math.sin(elevation), math.cos(elevation)] * (angle_feat_size // 4),
dtype=np.float32)
# Path: tasks/datasets/mp3d_envs.py
def get_all_point_angle_feature(sim, angle_feat_size):
return [get_point_angle_feature(sim, angle_feat_size, baseViewId) for baseViewId in range(36)]
# Path: tasks/datasets/mp3d_envs.py
def load_nav_graphs(connectivity_dir, scans):
''' Load connectivity graph for each scan '''
def distance(pose1, pose2):
''' Euclidean distance between two graph poses '''
return ((pose1['pose'][3] - pose2['pose'][3]) ** 2 \
+ (pose1['pose'][7] - pose2['pose'][7]) ** 2 \
+ (pose1['pose'][11] - pose2['pose'][11]) ** 2) ** 0.5
graphs = {}
for scan in scans:
with open(os.path.join(connectivity_dir, '%s_connectivity.json' % scan)) as f:
G = nx.Graph()
positions = {}
data = json.load(f)
for i, item in enumerate(data):
if item['included']:
for j, conn in enumerate(item['unobstructed']):
if conn and data[j]['included']:
positions[item['image_id']] = np.array([item['pose'][3],
item['pose'][7], item['pose'][11]]);
assert data[j]['unobstructed'][i], 'Graph should be undirected'
G.add_edge(item['image_id'], data[j]['image_id'], weight=distance(item, data[j]))
nx.set_node_attributes(G, values=positions, name='position')
graphs[scan] = G
return graphs
# Path: tools/evaluation/bleu/bleu.py
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
bleu_scorer += (hypo[0], ref)
# score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
# score, scores = bleu_scorer.compute_score(option='average', verbose=1)
return score, scores
def __str__(self):
return 'BLEU'
# Path: tools/evaluation/rouge/rouge.py
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert (len(candidate) == 1)
assert (len(refs) > 0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs / float(len(token_c)))
rec.append(lcs / float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if (prec_max != 0 and rec_max != 0):
score = ((1 + self.beta ** 2) * prec_max * rec_max) / float(rec_max + self.beta ** 2 * prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert (gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def __str__(self):
return 'ROUGE'
# Path: tools/evaluation/cider/cider.py
class Cider:
"""
Main Class to compute the CIDEr metric
"""
def __init__(self, gts=None, n=4, sigma=6.0):
# set cider to sum over 1 to 4-grams
self._n = n
# set the standard deviation parameter for gaussian penalty
self._sigma = sigma
self.doc_frequency = None
self.ref_len = None
if gts is not None:
tmp_cider = CiderScorer(gts, n=self._n, sigma=self._sigma)
self.doc_frequency = tmp_cider.doc_frequency
self.ref_len = tmp_cider.ref_len
def compute_score(self, gts, res):
"""
Main function to compute CIDEr score
:param gts (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
res (dict) : dictionary with key <image> and value <tokenized reference sentence>
:return: cider (float) : computed CIDEr score for the corpus
"""
assert(gts.keys() == res.keys())
cider_scorer = CiderScorer(gts, test=res, n=self._n, sigma=self._sigma, doc_frequency=self.doc_frequency,
ref_len=self.ref_len)
return cider_scorer.compute_score()
def __str__(self):
return 'CIDEr'
# Path: tasks/datasets/mp3d_dataset.py
class MP3DDataset(BaseDataset):
def __init__(
self,
args,
config,
training=False,
logger=None,
source=None,
):
super().__init__()
self.config = config
self.angle_feat_size = self.config.angle_feat_size
self.logger = logger
self.training = training
self.debug = args.debug
self.source = source
if self.training:
self.split = "train"
self.max_objects = self.config.max_objects
self.multi_endpoints = True
else:
self.split = args.validation_split
self.max_objects = None
self.multi_endpoints = False
self.batch_size = args.batch_size
self.seed = args.seed
self.feat_db = None
self.obj_feat_db = None
# connectivity graph
self.connectivity_dir = str(args.data_dir/'connectivity')
# load mp3d dataset
msg = self._load_data(config, args.data_dir)
self.buffered_state_dict = {}
# simulator
self.sim = new_simulator(self.connectivity_dir)
# angle features
self.angle_feature = get_all_point_angle_feature(self.sim, self.angle_feat_size)
# navigation graph
self._load_nav_graphs()
if logger is not None:
logger.info('[INFO] %s loaded with %d instructions, using splits: %s' % (
self.__class__.__name__, len(self.alldata), self.split))
logger.info(msg)
del self.data
def init_feat_db(self, feat_db, obj_feat_db=None):
self.feat_db = feat_db
self.obj_feat_db = obj_feat_db
def _load_data(self, config, data_dir):
self.data = dict()
self.alldata = []
msg = ""
if self.source == "R2R":
anno_file = get_anno_file_path(data_dir, config.R2R.DIR, config.R2R.SPLIT[self.split])
self.data['r2r'], self.gt_trajs = self.load_data(anno_file=anno_file, debug=self.debug)
msg += '\n- Dataset: load {} R2R samples'.format(len(self.data['r2r']))
elif self.source == "REVERIE":
anno_file = get_anno_file_path(data_dir, config.REVERIE.DIR, config.REVERIE.SPLIT[self.split])
bbox_file = get_anno_file_path(data_dir, config.REVERIE.DIR, config.REVERIE.bbox_file)
obj2vps = self.load_obj2vps(bbox_file)
self.data['reverie'], self.gt_trajs = self.load_data(anno_file=anno_file, obj2vps=obj2vps, debug=self.debug)
msg += '\n- Dataset: load {} REVERIE samples'.format(len(self.data['reverie']))
elif self.source == "CVDN":
anno_file = get_anno_file_path(data_dir, config.CVDN.DIR, config.CVDN.SPLIT[self.split])
self.data['cvdn'], self.gt_trajs = self.load_data(anno_file=anno_file, debug=self.debug)
msg += '\n- Dataset: load {} CVDN samples'.format(len(self.data['cvdn']))
elif self.source == "SOON":
anno_file = get_anno_file_path(data_dir, config.SOON.DIR, config.SOON.SPLIT[self.split])
self.data['soon'], self.gt_trajs = self.load_data(anno_file=anno_file, debug=self.debug)
msg += '\n- Dataset: load {} SOON samples'.format(len(self.data['soon']))
elif self.source == "R2R_AUG":
anno_file = get_anno_file_path(data_dir, config.R2R_AUG.DIR, config.R2R_AUG.SPLIT[self.split])
self.data["r2r_aug"], _ = self.load_data(anno_file=anno_file, debug=self.debug)
elif self.source == "REVERIE_AUG":
anno_file = get_anno_file_path(data_dir, config.REVERIE_AUG.DIR, config.REVERIE_AUG.SPLIT[self.split])
bbox_file = get_anno_file_path(data_dir, config.REVERIE.DIR, config.REVERIE.bbox_file)
obj2vps = self.load_obj2vps(bbox_file)
self.data["reverie_aug"], _ = self.load_data(anno_file=anno_file, obj2vps=obj2vps, debug=self.debug)
elif self.source == "EQA":
anno_file = get_anno_file_path(data_dir, config.EQA.DIR, config.EQA.SPLIT[self.split])
self.data['eqa'], self.gt_trajs = self.load_data(anno_file=anno_file, split=self.split, debug=self.debug)
else:
print("Dataset Source: {}".format(self.source))
raise NotImplementedError
for key, value in self.data.items():
self.alldata += value
msg += '\n- Dataset: load {} split: {} samples in total'.format(self.split, len(self.alldata))
self.scans = set([x['scan'] for x in self.alldata])
msg += '\n- Dataset: load {} split: {} scans in total'.format(self.split, len(self.scans))
return msg
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
# print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.connectivity_dir, self.scans)
self.shortest_paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.shortest_paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.shortest_distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.shortest_distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def __len__(self):
return len(self.alldata)
def __getitem__(self, index):
item = copy.deepcopy(self.alldata[index])
item = self.preprocess_item(item)
data_type = item['data_type']
scan = item['scan']
instr_id = item['instr_id']
scanIds = [scan]
viewpointIds = [item['path'][0]]
headings = [item['heading']]
env = EnvBatch(connectivity_dir=self.connectivity_dir, batch_size=1)
env.newEpisodes(scanIds, viewpointIds, headings)
observations = self.get_obs(items=[item], env=env, data_type=data_type)[0]
data_dict = {
'sample_idx': index,
'instr_id': instr_id,
'observations': observations,
'env': env,
'item': item,
'data_type': data_type,
}
return data_dict
def preprocess_item(self, item):
return item
@staticmethod
def collate_batch(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
try:
if key in ['NotImplemented']:
ret[key] = torch.stack(val, 0)
else:
ret[key] = val
except:
print('Error in collate_batch: key=%s' % key)
raise TypeError
ret['batch_size'] = batch_size
return ret
def get_object_info(self, item):
raise NotImplementedError
def get_obs(self, items, env, data_type=None):
obs = []
for i, (feature, state) in enumerate(env.getStates()):
item = items[i]
base_view_id = state.viewIndex
if feature is None:
feature = self.feat_db.get_image_feature(state.scanId, state.location.viewpointId)
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# [visual_feature, angle_feature] for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
ob = {
'instr_id': item['instr_id'],
'scan': state.scanId,
'viewpoint': state.location.viewpointId,
'viewIndex': state.viewIndex,
'position': (state.location.x, state.location.y, state.location.z),
'heading': state.heading,
'elevation': state.elevation,
'feature': feature,
'candidate': candidate,
'navigableLocations': state.navigableLocations,
'instruction': item['instruction'],
# 'instr_encoding': item['instr_encoding'],
'gt_path': item['path'],
'path_id': item['path_id'],
}
if 'fg_instruction' in item:
ob.update({
'fg_instruction': item['fg_instruction'],
'fg_view': item['fg_view'],
})
if self.obj_feat_db is not None:
obj_info = self.get_object_info(item, state)
ob.update(obj_info)
ob['distance'] = 0
else:
# RL reward. The negative distance between the state and the final state
# There are multiple gt end viewpoints on REVERIE.
if False: # ob['instr_id'] in self.gt_trajs:
ob['distance'] = self.shortest_distances[ob['scan']][ob['viewpoint']][item['path'][-1]]
else:
ob['distance'] = 0
obs.append(ob)
return obs
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
base_elevation = (viewId // 12 - 1) * math.radians(30)
adj_dict = {}
long_id = "%s_%s" % (scanId, viewpointId)
if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode([scanId], [viewpointId], [0], [math.radians(-30)])
elif ix % 12 == 0:
self.sim.makeAction([0], [1.0], [1.0])
else:
self.sim.makeAction([0], [1.0], [0])
state = self.sim.getState()[0]
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation - base_elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = angle_feature(loc_heading, loc_elevation, self.angle_feat_size)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
"normalized_elevation": state.elevation + loc.rel_elevation,
'scanId': scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1),
'position': (loc.x, loc.y, loc.z),
}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [
{key: c[key]
for key in
['normalized_heading', 'normalized_elevation', 'scanId', 'viewpointId',
'pointId', 'idx', 'position']}
for c in candidate
]
return candidate
else:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
visual_feat = feature[ix]
c_new['heading'] = c_new['normalized_heading'] - base_heading
c_new['elevation'] = c_new['normalized_elevation'] - base_elevation
angle_feat = angle_feature(c_new['heading'], c_new['elevation'], self.angle_feat_size)
c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
c_new.pop('normalized_heading')
c_new.pop('normalized_elevation')
candidate_new.append(c_new)
return candidate_new
def get_nearest(self, shortest_distances, goal_id, path):
near_id = path[0]
near_d = shortest_distances[near_id][goal_id]
for item in path:
d = shortest_distances[item][goal_id]
if d < near_d:
near_id = item
near_d = d
return near_id
# Path: tasks/datasets/mp3d_dataset.py
def get_anno_file_path(data_dir, dataset_path, filename):
if dataset_path.startswith('/'):
return Path(dataset_path) / filename
return Path(data_dir) / dataset_path / filename
# Path: tasks/datasets/eqa.py
import json
import copy
import torch.utils.data as torch_data
import torch
import math
import numpy as np
import networkx as nx
from pathlib import Path
from collections import defaultdict
from .mp3d_envs import (
EnvBatch, new_simulator, angle_feature,
get_all_point_angle_feature, load_nav_graphs,
)
from tools.evaluation.bleu import Bleu
from tools.evaluation.rouge import Rouge
from tools.evaluation.cider import Cider
from .mp3d_dataset import MP3DDataset, get_anno_file_path
ERROR_MARGIN = 3.0
class EQADataset(MP3DDataset):
name = "eqa"
def __init__(
self,
args,
config,
training=False,
logger=None,
source=None,
):
super().__init__(args, config, training, logger, source)
# answer_vocab
filename = get_anno_file_path(args.data_dir, config.EQA.DIR, config.EQA.ANSWER_VOCAB)
with open(filename) as f:
self.answer_vocab = json.load(f)
def init_feat_db(self, feat_db, obj_feat_db=None):
self.feat_db = feat_db
| self.obj_feat_db = obj_feat_db |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jwebmeister/tacspeak
# Path: tacspeak/__main__.py
def main():
user_settings_path = os.path.join(os.getcwd(), os.path.relpath("tacspeak/user_settings.py"))
user_settings = CommandModule(user_settings_path)
user_settings.load()
try:
DEBUG_MODE = (sys.modules["user_settings"]).DEBUG_MODE
except Exception:
print("Failed to load `tacspeak/user_settings.py` DEBUG_MODE. Using default settings as fallback.")
DEBUG_MODE = False
try:
DEBUG_HEAVY_DUMP_GRAMMAR = (sys.modules["user_settings"]).DEBUG_HEAVY_DUMP_GRAMMAR
except Exception:
print("Failed to load `tacspeak/user_settings.py` DEBUG_HEAVY_DUMP_GRAMMAR. Using default settings as fallback.")
DEBUG_HEAVY_DUMP_GRAMMAR = False
try:
KALDI_ENGINE_SETTINGS = (sys.modules["user_settings"]).KALDI_ENGINE_SETTINGS
except Exception:
print("Failed to load `tacspeak/user_settings.py` KALDI_ENGINE_SETTINGS. Using default settings as fallback.")
KALDI_ENGINE_SETTINGS = {
"listen_key":0x10, # 0x10=SHIFT key, 0x05=X1 mouse button, 0x06=X2 mouse button, see https://learn.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
"listen_key_toggle":0, # 0 for toggle mode off; 1 for toggle mode on; 2 for global toggle on (use VAD); -1 for toggle mode off but allow priority grammar even when key not pressed
"vad_padding_end_ms":250, # ms of required silence after VAD
"auto_add_to_user_lexicon":False, # this requires g2p_en (which isn't installed by default)
"allow_online_pronunciations":False,
# "input_device_index":None, # set to an int to choose a non-default microphone
# "vad_aggressiveness":3, # default aggressiveness of VAD
# "vad_padding_start_ms":150, # default ms of required silence before VAD
# "model_dir":'kaldi_model', # default model directory
# "tmp_dir":None,
# "audio_input_device":None,
# "audio_self_threaded":True,
# "audio_auto_reconnect":True,
# "audio_reconnect_callback":None,
# "retain_dir":None, # set to a writable directory path to retain recognition metadata and/or audio data
# "retain_audio":None, # set to True to retain speech data wave files in the retain_dir (if set)
# "retain_metadata":None,
# "retain_approval_func":None,
# "vad_complex_padding_end_ms":600, # default ms of required silence after VAD for complex utterances
# "lazy_compilation":True, # set to True to parallelize & speed up loading
# "invalidate_cache":False,
# "expected_error_rate_threshold":None,
# "alternative_dictation":None,
# "compiler_init_config":None,
# "decoder_init_config":None,
}
def log_handlers():
log_file_path = os.path.join(os.getcwd(), ".tacspeak.log")
log_file_handler = logging.FileHandler(log_file_path)
log_file_formatter = logging.Formatter("%(asctime)s %(name)s (%(levelname)s): %(message)s")
log_file_handler.setFormatter(log_file_formatter)
log_stream_handler = logging.StreamHandler()
log_stream_formatter = logging.Formatter("%(name)s (%(levelname)s): %(message)s")
log_stream_handler.setFormatter(log_stream_formatter)
return [log_stream_handler, log_file_handler]
def setup_loggers(use_default_levels=True):
for name, levels in default_levels.items():
stderr_level, file_level = levels
handlers = log_handlers()
if use_default_levels:
handlers[0].setLevel(stderr_level)
handlers[1].setLevel(file_level)
logger = logging.getLogger(name)
logger.addHandler(handlers[0])
logger.addHandler(handlers[1])
logger.setLevel(min(stderr_level, file_level))
logger.propagate = False
if DEBUG_MODE:
setup_loggers(False)
logging.getLogger('grammar.decode').setLevel(20)
logging.getLogger('grammar.begin').setLevel(20)
logging.getLogger('compound').setLevel(20)
logging.getLogger('engine').setLevel(15)
logging.getLogger('kaldi').setLevel(15)
logging.getLogger('kaldi.compiler').setLevel(15)
logging.getLogger('kaldi.wrapper').setLevel(15)
logging.getLogger('action.exec').setLevel(10)
else:
setup_loggers()
# Set any configuration options here as keyword arguments.
# See Kaldi engine documentation for all available options and more info.
engine = get_engine('kaldi',**KALDI_ENGINE_SETTINGS)
# Call connect() now that the engine configuration is set.
engine.connect()
# Load grammars.
grammar_path = os.path.join(os.getcwd(), os.path.relpath("tacspeak/grammar/"))
directory = CommandModuleDirectory(grammar_path)
directory.load()
handlers = log_handlers()
log_recognition = logging.getLogger('on_recognition')
log_recognition.addHandler(handlers[0])
log_recognition.addHandler(handlers[1])
log_recognition.setLevel(20)
# Define recognition callback functions.
def on_begin():
pass
def on_recognition(words, results):
message = f"{results.kaldi_rule} | {' '.join(words)}"
log_recognition.log(20, message)
def on_failure():
pass
def on_end():
pass
# Start the engine's main recognition loop
engine.prepare_for_recognition()
try:
print("Ready to listen...")
engine.do_recognition(on_begin, on_recognition, on_failure, on_end)
except KeyboardInterrupt:
pass
# Disconnect from the engine, freeing its resources.
engine.disconnect()
# Path: tacspeak/test_model.py
def test_model(tsv_file, model_dir, lexicon_file=None, num_threads=1):
# from tacspeak.test_model import test_model
# test_model("./testaudio/recorder.tsv", "./kaldi_model/")
# python -c 'from tacspeak.test_model import test_model; test_model("./testaudio/recorder.tsv", "./kaldi_model/")'
print("Start test_model")
calculator = Calculator()
lexicon = set()
if lexicon_file:
with open(lexicon_file, 'r', encoding='utf-8') as f:
for line in f:
word = line.strip().split(None, 1)[0]
lexicon.add(word)
print(f"opening {tsv_file}")
with open(tsv_file, 'r', encoding='utf-8') as f:
submissions = []
for line in f:
fields = line.rstrip('\n').split('\t')
text = fields[4]
wav_path = fields[0]
if not os.path.exists(wav_path):
print(f"{wav_path} does not exist")
continue
if lexicon_file and any(word not in lexicon for word in text.split()):
print(f"{wav_path} is out of vocabulary: {text}")
continue
submissions.append((wav_path, text,))
print(f"read lines: {len(submissions)}")
# initialize first in-case model needs to be recompiled
engine = initialize_kaldi(model_dir)
engine.disconnect()
utterances_list = []
cmd_all_threads_overall_stats = []
with multiprocessing.Pool(processes=num_threads, initializer=initialize_kaldi, initargs=(model_dir,)) as pool:
try:
cmd_thread_overall_stats = {'cmd_not_correct_output':0,
'cmd_not_correct_rule':0,
'cmd_not_correct_options':0,
'cmd_not_recog_output':0,
'cmd_not_recog_input':0,
'cmds':0,
}
for output_str, text, output_options, input_options, correct_rule, wav_path in pool.starmap(recognize, submissions, chunksize=1):
result = calculator.calculate(text.strip().split(), output_str.strip().split())
n_errors = result['sub'] + result['del'] + result['ins']
n_correct = result['cor']
n_all = result['all']
rate_errors = float(n_errors) / float(max(1, n_all))
cmd_recog_input = 1 if input_options is not None else -1
cmd_recog_output = 1 if output_options is not None else -1
cmd_correct_rule = correct_rule
cmd_correct_options = 0
cmd_correct_output = 0
if cmd_recog_input == 1 and cmd_recog_output == 1:
if cmd_correct_rule == 1:
cmd_correct_options = 1
for key, value in input_options.items():
if output_options[key] != value:
cmd_correct_options = -1
if correct_rule == 1:
cmd_recog_input = 1
cmd_recog_output = 1
if cmd_correct_rule == -1 or cmd_correct_options == -1:
cmd_correct_output = -1
elif cmd_correct_rule == 1 and cmd_correct_options == 1:
cmd_correct_output = 1
cmd_thread_overall_stats['cmd_not_correct_output'] += 1 if cmd_correct_output == -1 else 0
cmd_thread_overall_stats['cmd_not_correct_rule'] += 1 if cmd_correct_rule == -1 else 0
cmd_thread_overall_stats['cmd_not_correct_options'] += 1 if cmd_correct_options == -1 else 0
cmd_thread_overall_stats['cmd_not_recog_output'] += 1 if cmd_recog_output == -1 else 0
cmd_thread_overall_stats['cmd_not_recog_input'] += 1 if cmd_recog_input == -1 else 0
cmd_thread_overall_stats['cmds'] += 1
entry = {'ref':text, 'hyp':output_str, 'wav_path':wav_path,
'cmd_correct_output':cmd_correct_output,
'cmd_correct_rule':cmd_correct_rule,
'cmd_correct_options':cmd_correct_options,
'cmd_recog_output':cmd_recog_output,
'cmd_recog_input':cmd_recog_input,
'output_options':output_options,
'input_options':input_options,
'n_errors':n_errors, 'n_correct':n_correct, 'n_all':n_all, 'rate_errors':rate_errors
}
utterances_list.append(entry)
cmd_all_threads_overall_stats.append(cmd_thread_overall_stats)
except KeyboardInterrupt as e:
print(f"Closing pool: {e}")
pool.close()
return None
utterances_list.sort(key=lambda x: (x['cmd_correct_output'] * 100.0) + (x['cmd_correct_rule'] * 3.0) + (x['cmd_correct_options'] * 3.0) + (x['cmd_recog_output'] * 2.0) + x['cmd_recog_input'] - x['rate_errors'], reverse=False)
cmd_overall_stats = {}
cmd_overall_stats['cmd_not_correct_output'] = 0
cmd_overall_stats['cmd_not_correct_rule'] = 0
cmd_overall_stats['cmd_not_correct_options'] = 0
cmd_overall_stats['cmd_not_recog_output'] = 0
cmd_overall_stats['cmd_not_recog_input'] = 0
cmd_overall_stats['cmds'] = 0
for thread_item in cmd_all_threads_overall_stats:
cmd_overall_stats['cmd_not_correct_output'] += thread_item['cmd_not_correct_output']
cmd_overall_stats['cmd_not_correct_rule'] += thread_item['cmd_not_correct_rule']
cmd_overall_stats['cmd_not_correct_options'] += thread_item['cmd_not_correct_options']
cmd_overall_stats['cmd_not_recog_output'] += thread_item['cmd_not_recog_output']
cmd_overall_stats['cmd_not_recog_input'] += thread_item['cmd_not_recog_input']
cmd_overall_stats['cmds'] += thread_item['cmds']
with open('./test_model_output_utterances.txt', 'w', encoding='utf-8') as outfile:
outfile.write(f"{cmd_overall_stats}\n\n")
for item in utterances_list:
outfile.write( f"\n cmd_correct_output={item['cmd_correct_output']}, "
+ f"cmd_correct_rule={item['cmd_correct_rule']}, "
+ f"cmd_correct_options={item['cmd_correct_options']}, "
+ f"cmd_recog_output={item['cmd_recog_output']}, "
+ f"cmd_recog_input={item['cmd_recog_input']}"
+ f"\n errors={item['n_errors']}, n_correct={item['n_correct']}"
+ f", n_all={item['n_all']}, rate_errors={item['rate_errors']}"
+ f"\n ref: {item['ref']}"
+ f"\n hyp: {item['hyp']}"
+ f"\n wav_path: {item['wav_path']}"
+ f"\n input_options: {item['input_options']}"
+ f"\n output_options: {item['output_options']}"
+ "\n"
)
print(f"{calculator.overall_string()}")
print(f"Command stats -> {cmd_overall_stats}")
return calculator, cmd_overall_stats
# Path: tacspeak/test_model.py
def test_model_dictation(tsv_file, model_dir, lexicon_file=None, num_threads=1):
print("Start test_model_dictation")
call_recognizer = None
calculator = Calculator()
lexicon = set()
if lexicon_file:
with open(lexicon_file, 'r', encoding='utf-8') as f:
for line in f:
word = line.strip().split(None, 1)[0]
lexicon.add(word)
print(f"opening {tsv_file}")
with open(tsv_file, 'r', encoding='utf-8') as f:
submissions = []
for line in f:
fields = line.rstrip('\n').split('\t')
text = fields[4]
wav_path = fields[0]
if not os.path.exists(wav_path):
print(f"{wav_path} does not exist")
continue
if lexicon_file and any(word not in lexicon for word in text.split()):
print(f"{wav_path} is out of vocabulary: {text}")
continue
submissions.append((wav_path, text,))
print(f"read lines: {len(submissions)}")
# initialize first in-case model needs to be recompiled
initialize_kaldi_dictation(model_dir)
utterances_list = []
with multiprocessing.Pool(processes=num_threads, initializer=initialize_kaldi_dictation, initargs=(model_dir,)) as pool:
try:
for output_str, text, wav_path in pool.starmap(recognize_dictation, submissions, chunksize=1):
result = calculator.calculate(text.strip().split(), output_str.strip().split())
n_errors = result['sub'] + result['del'] + result['ins']
n_correct = result['cor']
n_all = result['all']
rate_errors = float(n_errors) / float(max(1, n_all))
entry = {'ref':text, 'hyp':output_str, 'wav_path':wav_path,
'n_errors':n_errors, 'n_correct':n_correct, 'n_all':n_all, 'rate_errors':rate_errors
}
utterances_list.append(entry)
except KeyboardInterrupt as e:
print(f"Closing pool: {e}")
pool.close()
return None
utterances_list.sort(key=lambda x: x['n_errors'], reverse=True)
with open('./test_model_output_dictation.txt', 'w', encoding='utf-8') as outfile:
for item in utterances_list:
outfile.write( f"\n errors={item['n_errors']}, n_correct={item['n_correct']}"
+ f", n_all={item['n_all']}, rate_errors={item['rate_errors']}"
+ f"\n ref: {item['ref']}"
+ f"\n hyp: {item['hyp']}"
+ f"\n wav_path: {item['wav_path']}"
+ "\n"
)
print(f"{calculator.overall_string()}")
return calculator, None
# Path: tacspeak/test_model.py
def transcribe_wav(wav_path, out_txt_path=None, model_dir=None):
call_recognizer = None
if model_dir is None:
model_dir = "./kaldi_model/"
initialize_kaldi(model_dir)
output_str, text, output_options, input_options, correct_rule, wav_path = recognize(wav_path, "")
entry = (model_dir, wav_path, output_str)
if out_txt_path is None:
return entry
if os.path.isfile(out_txt_path):
with open(out_txt_path, 'a') as f:
f.write(f"{entry}\n")
else:
with open(out_txt_path, 'w') as f:
f.write(f"{entry}\n")
return entry
# Path: tacspeak/test_model.py
def transcribe_wav_dictation(wav_path, out_txt_path=None, model_dir=None):
call_recognizer = None
if model_dir is None:
model_dir = "./kaldi_model/"
initialize_kaldi_dictation(model_dir)
output_str, _, wav_path = recognize_dictation(wav_path, "")
entry = (model_dir, wav_path, output_str)
if out_txt_path is None:
return entry
if os.path.isfile(out_txt_path):
with open(out_txt_path, 'a') as f:
f.write(f"{entry}\n")
else:
with open(out_txt_path, 'w') as f:
f.write(f"{entry}\n")
return entry
# Path: cli.py
import argparse
import os
import tacspeak
import logging
import kaldifst
import graphviz
from kaldi_active_grammar import Compiler, disable_donation_message
from tacspeak.__main__ import main as tacspeak_main
from tacspeak.test_model import test_model, test_model_dictation, transcribe_wav, transcribe_wav_dictation
from dragonfly import get_engine
from multiprocessing import freeze_support
#
# This file is part of Tacspeak.
# (c) Copyright 2023-2024 by Joshua Webb
# Licensed under the AGPL-3.0; see LICENSE.txt file.
#
def main():
print(f"Tacspeak version {tacspeak.__version__}")
print_notices()
disable_donation_message()
parser = argparse.ArgumentParser(description='Start speech recognition.')
parser.add_argument('--recompile_model', dest='model_dir', action='store',
metavar='model_dir', nargs='?', const='kaldi_model/',
help='recompile the model in `model_dir` (default is kaldi_model/), for changes to user_lexicon.txt')
parser.add_argument('--print_mic_list', action='store_true',
help=('see a list of available input devices and their corresponding indexes and names.'
+ ' useful for setting `audio_input_device` in ./tacspeak/user_settings.py'))
parser.add_argument('--test_model', dest='test_model', action='store',
metavar=('tsv_file', 'model_dir', 'lexicon_file', 'num_threads'), nargs=4,
help=('test model + active grammar recognition using test audio specified in .tsv file.'
+ " Example: --test_model './retain/retain.tsv' './kaldi_model/' './kaldi_model/lexicon.txt' 4"))
parser.add_argument('--test_dictation', action='store_true',
help=('only used together with --test_model. tests model using raw dictation graph, irrespective of grammar modules.'
+ " Example: --test_model './retain/retain.tsv' './kaldi_model/' './kaldi_model/lexicon.txt' 4 --test_dictation"))
parser.add_argument('--transcribe_wav', dest='transcribe_wav', action='store',
metavar=('wav_path', 'out_txt_path', 'model_dir'), nargs=3,
help=('transcribe a wav file using active grammar modules, output to txt file.'
+ " Example: --transcribe_wav 'audio.wav' 'audio.txt' './kaldi_model/'"))
parser.add_argument('--transcribe_dictation', action='store_true',
help=('only used together with --transcribe_wav. transcribes using raw dictation graph, irrespective of grammar modules.'
+ " Example: --transcribe_wav 'audio.wav' 'audio.txt' './kaldi_model/' --transcribe_dictation"))
parser.add_argument('--visualise_fst', dest='fst_filepath', action='store',
metavar=('fst_filepath', 'model_words_txt_filepath'), nargs=2,
help='generate .gv (dot) and .svg for visualisation of a FST file. Only use with small (~200 kB) files! Requires GraphViz installed.'
+ " Example: --visualise_fst './kaldi_model/cache.tmp/somefile.fst' './kaldi_model/words.txt'")
args = parser.parse_args()
if args.model_dir is not None and os.path.isdir(args.model_dir):
_log = logging.getLogger('kaldi')
logging.basicConfig(level=5)
compiler = Compiler(args.model_dir)
print("Compiling dictation graph (approx. 30 minutes)...")
compiler.compile_agf_dictation_fst()
return
if args.print_mic_list:
get_engine('kaldi').print_mic_list()
input("Press enter key to exit.")
return
if args.test_model:
if args.test_model[0] is not None and os.path.isfile(args.test_model[0]) and args.test_model[1] is not None and os.path.isdir(args.test_model[1]):
tsv_file = args.test_model[0]
model_dir = args.test_model[1]
try:
lexicon_file = args.test_model[2]
if not os.path.isfile(lexicon_file):
lexicon_file = None
except Exception as e:
print(f"{e}")
lexicon_file = None
try:
num_threads = int(args.test_model[3])
if not isinstance(num_threads, int) or num_threads < 1:
num_threads = 1
except Exception as e:
print(f"{e}")
num_threads = 1
print(f"{tsv_file},{model_dir},{lexicon_file},{num_threads}")
if args.test_dictation:
calculator, cmd_overall_stats = test_model_dictation(tsv_file, model_dir, lexicon_file, num_threads)
outfile_path = 'test_model_output_dictation_tokens.txt'
else:
calculator, cmd_overall_stats = test_model(tsv_file, model_dir, lexicon_file, num_threads)
outfile_path = 'test_model_output_tokens.txt'
with open(outfile_path, 'w', encoding='utf-8') as outfile:
outfile.write(f"\n{calculator.overall_string()}\n")
for item in calculator.data.items():
outfile.write(f"\n{str(item)}")
outfile.write("\n")
for entry in calculator.ranked_worst_to_best_list():
outfile.write(f"\n{str(entry)}")
overall_entry_1 = (model_dir, tsv_file, "Dictation" if args.test_dictation else "Command", "WER", calculator.overall_string())
overall_entry_2 = (model_dir, tsv_file, "Dictation" if args.test_dictation else "Command", "CMDERR", cmd_overall_stats)
with open('test_model_output_overall.txt', 'a', encoding='utf-8') as outfile:
outfile.write(f"{overall_entry_1}\n")
if not args.test_dictation:
outfile.write(f"{overall_entry_2}\n")
return calculator.overall_string(), cmd_overall_stats
return
if args.transcribe_wav:
if args.transcribe_wav[0] is not None and os.path.isfile(args.transcribe_wav[0]):
wav_path = args.transcribe_wav[0]
| try: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KylinYee/R2-Talker-code
# Path: nerf/utils.py
def get_audio_features(features, att_mode, index, smooth_win_size=8):
if att_mode == 0:
return features[[index]]
elif att_mode == 1:
left = index - smooth_win_size
pad_left = 0
if left < 0:
pad_left = -left
left = 0
auds = features[left:index]
if pad_left > 0:
# pad may be longer than auds, so do not use zeros_like
auds = torch.cat([torch.zeros(pad_left, *auds.shape[1:], device=auds.device, dtype=auds.dtype), auds], dim=0)
return auds
elif att_mode == 2:
left = index - smooth_win_size//2
right = index + (smooth_win_size-smooth_win_size//2)
pad_left = 0
pad_right = 0
if left < 0:
pad_left = -left
left = 0
if right > features.shape[0]:
pad_right = right - features.shape[0]
right = features.shape[0]
auds = features[left:right]
if pad_left > 0:
auds = torch.cat([torch.zeros_like(auds[:pad_left]), auds], dim=0)
if pad_right > 0:
auds = torch.cat([auds, torch.zeros_like(auds[:pad_right])], dim=0) # [8, 16]
return auds
else:
raise NotImplementedError(f'wrong att_mode: {att_mode}')
# Path: nerf/utils.py
@torch.cuda.amp.autocast(enabled=False)
def get_rays(poses, intrinsics, H, W, N=-1, patch_size=1, rect=None):
''' get rays
Args:
poses: [B, 4, 4], cam2world
intrinsics: [4]
H, W, N: int
Returns:
rays_o, rays_d: [B, N, 3]
inds: [B, N]
'''
device = poses.device
B = poses.shape[0]
fx, fy, cx, cy = intrinsics
if rect is not None:
xmin, xmax, ymin, ymax = rect
N = (xmax - xmin) * (ymax - ymin)
i, j = custom_meshgrid(torch.linspace(0, W-1, W, device=device), torch.linspace(0, H-1, H, device=device)) # float
i = i.t().reshape([1, H*W]).expand([B, H*W]) + 0.5
j = j.t().reshape([1, H*W]).expand([B, H*W]) + 0.5
results = {}
if N > 0:
N = min(N, H*W)
if patch_size > 1:
# random sample left-top cores.
# NOTE: this impl will lead to less sampling on the image corner pixels... but I don't have other ideas.
num_patch = N // (patch_size ** 2)
inds_x = torch.randint(0, H - patch_size, size=[num_patch], device=device)
inds_y = torch.randint(0, W - patch_size, size=[num_patch], device=device)
inds = torch.stack([inds_x, inds_y], dim=-1) # [np, 2]
# create meshgrid for each patch
pi, pj = custom_meshgrid(torch.arange(patch_size, device=device), torch.arange(patch_size, device=device))
offsets = torch.stack([pi.reshape(-1), pj.reshape(-1)], dim=-1) # [p^2, 2]
inds = inds.unsqueeze(1) + offsets.unsqueeze(0) # [np, p^2, 2]
inds = inds.view(-1, 2) # [N, 2]
inds = inds[:, 0] * W + inds[:, 1] # [N], flatten
inds = inds.expand([B, N])
# only get rays in the specified rect
elif rect is not None:
# assert B == 1
mask = torch.zeros(H, W, dtype=torch.bool, device=device)
xmin, xmax, ymin, ymax = rect
mask[xmin:xmax, ymin:ymax] = 1
inds = torch.where(mask.view(-1))[0] # [nzn]
inds = inds.unsqueeze(0) # [1, N]
else:
inds = torch.randint(0, H*W, size=[N], device=device) # may duplicate
inds = inds.expand([B, N])
i = torch.gather(i, -1, inds)
j = torch.gather(j, -1, inds)
else:
inds = torch.arange(H*W, device=device).expand([B, H*W])
results['i'] = i
results['j'] = j
results['inds'] = inds
zs = torch.ones_like(i)
xs = (i - cx) / fx * zs
ys = (j - cy) / fy * zs
directions = torch.stack((xs, ys, zs), dim=-1)
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
rays_d = directions @ poses[:, :3, :3].transpose(-1, -2) # (B, N, 3)
rays_o = poses[..., :3, 3] # [B, 3]
rays_o = rays_o[..., None, :].expand_as(rays_d) # [B, N, 3]
results['rays_o'] = rays_o
results['rays_d'] = rays_d
return results
# Path: nerf/utils.py
@torch.cuda.amp.autocast(enabled=False)
def get_bg_coords(H, W, device):
X = torch.arange(H, device=device) / (H - 1) * 2 - 1 # in [-1, 1]
Y = torch.arange(W, device=device) / (W - 1) * 2 - 1 # in [-1, 1]
xs, ys = custom_meshgrid(X, Y)
bg_coords = torch.cat([xs.reshape(-1, 1), ys.reshape(-1, 1)], dim=-1).unsqueeze(0) # [1, H*W, 2], in [-1, 1]
return bg_coords
# Path: nerf/utils.py
@torch.cuda.amp.autocast(enabled=False)
def convert_poses(poses):
# poses: [B, 4, 4]
# return [B, 3], 4 rot, 3 trans
out = torch.empty(poses.shape[0], 6, dtype=torch.float32, device=poses.device)
out[:, :3] = matrix_to_euler_angles(poses[:, :3, :3])
out[:, 3:] = poses[:, :3, 3]
return out
# Path: nerf/provider.py
import os
import cv2
import glob
import json
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import trimesh
import torch
import torch.nn.functional as F
from scipy.spatial.transform import Slerp, Rotation
from torch.utils.data import DataLoader
from .utils import get_audio_features, get_rays, get_bg_coords, convert_poses
# support both [N, 16] labels and [N, 16, K] logits
if len(aud_features.shape) == 3:
# if self.opt.cond_type in ['eo', 'ds']:
# aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16]
if self.opt.emb:
print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')
aud_features = aud_features.argmax(1) # [N, 16]
else:
assert self.opt.emb, "aud only provide labels, must use --emb"
aud_features = aud_features.long()
print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')
self.torso_img = []
self.images = []
self.poses = []
self.exps = []
self.auds = []
self.face_rect = []
self.lips_rect = []
self.eye_area = []
for f in tqdm.tqdm(frames, desc=f'Loading {type} data'):
f_path = os.path.join(self.root_path, 'gt_imgs', str(f['img_id']) + '.jpg')
if not os.path.exists(f_path):
print('[WARN]', f_path, 'NOT FOUND!')
continue
pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]
pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)
self.poses.append(pose)
if self.preload > 0:
image = cv2.imread(f_path, cv2.IMREAD_UNCHANGED) # [H, W, 3] o [H, W, 4]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.astype(np.float32) / 255 # [H, W, 3/4]
self.images.append(image)
else:
self.images.append(f_path)
# load frame-wise bg
torso_img_path = os.path.join(self.root_path, 'torso_imgs', str(f['img_id']) + '.png')
if self.preload > 0:
torso_img = cv2.imread(torso_img_path, cv2.IMREAD_UNCHANGED) # [H, W, 4]
torso_img = cv2.cvtColor(torso_img, cv2.COLOR_BGRA2RGBA)
torso_img = torso_img.astype(np.float32) / 255 # [H, W, 3/4]
self.torso_img.append(torso_img)
else:
self.torso_img.append(torso_img_path)
# find the corresponding audio to the image frame
if not self.opt.asr and self.opt.aud == '':
aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...
self.auds.append(aud)
# load lms and extract face
lms = np.loadtxt(os.path.join(self.root_path, 'ori_imgs', str(f['img_id']) + '.lms')) # [68, 2]
xmin, xmax = int(lms[31:36, 1].min()), int(lms[:, 1].max())
ymin, ymax = int(lms[:, 0].min()), int(lms[:, 0].max())
self.face_rect.append([xmin, xmax, ymin, ymax])
if self.opt.exp_eye:
eyes_left = slice(36, 42)
eyes_right = slice(42, 48)
area_left = polygon_area(lms[eyes_left, 0], lms[eyes_left, 1])
area_right = polygon_area(lms[eyes_right, 0], lms[eyes_right, 1])
# area percentage of two eyes of the whole image...
area = (area_left + area_right) / (self.H * self.W) * 100
self.eye_area.append(area)
if self.opt.finetune_lips:
lips = slice(48, 60)
xmin, xmax = int(lms[lips, 1].min()), int(lms[lips, 1].max())
ymin, ymax = int(lms[lips, 0].min()), int(lms[lips, 0].max())
# padding to H == W
cx = (xmin + xmax) // 2
cy = (ymin + ymax) // 2
l = max(xmax - xmin, ymax - ymin) // 2
xmin = max(0, cx - l)
xmax = min(self.H, cx + l)
ymin = max(0, cy - l)
ymax = min(self.W, cy + l)
self.lips_rect.append([xmin, xmax, ymin, ymax])
# load pre-extracted background image (should be the same size as training image...)
if self.opt.bg_img == 'white': # special
bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)
elif self.opt.bg_img == 'black': # special
bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)
else: # load from file
# default bg
if self.opt.bg_img == '':
self.opt.bg_img = os.path.join(self.root_path, 'bc.jpg')
bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]
if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:
bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)
bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)
bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]
self.bg_img = bg_img
| self.poses = np.stack(self.poses, axis=0) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: camenduru/magicanimate-hf
# Path: magicanimate/models/unet_3d_blocks.py
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
output_states = ()
for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)[0]
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: magicanimate/models/unet_3d_blocks.py
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
encoder_hidden_states=None,
upsample_size=None,
attention_mask=None,
):
for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)[0]
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: magicanimate/models/unet_3d_blocks.py
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
motion_modules = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None):
output_states = ()
for resnet, motion_module in zip(self.resnets, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: magicanimate/models/unet_3d_blocks.py
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=in_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
hidden_states = resnet(hidden_states, temb)
return hidden_states
# Path: magicanimate/models/unet_3d_blocks.py
class UpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_upsample=True,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
motion_modules = []
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):
for resnet, motion_module in zip(self.resnets, self.motion_modules):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: magicanimate/models/unet_3d_blocks.py
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
# Path: magicanimate/models/unet_3d_blocks.py
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
# Path: magicanimate/models/resnet.py
class InflatedConv3d(nn.Conv2d):
def forward(self, x):
video_length = x.shape[2]
x = rearrange(x, "b c f h w -> (b f) c h w")
x = super().forward(x)
x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
return x
# Path: magicanimate/models/unet.py
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from diffusers.utils import WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint
# *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Adapted from https://github.com/guoyww/AnimateDiff
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
| def __init__( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TISUnion/PrimeBackup
# Path: prime_backup/action/list_backup_action.py
class ListBackupAction(_ListBackupActionBase[List[BackupInfo]]):
def run(self) -> List[BackupInfo]:
with DbAccess.open_session() as session:
backups = session.list_backup(backup_filter=self.backup_filter, limit=self.limit, offset=self.offset)
return [BackupInfo.of(backup) for backup in backups]
# Path: prime_backup/mcdr/mcdr_globals.py
def __init():
def load():
# Path: prime_backup/mcdr/task/basic_task.py
class LightTask(_BasicTask[_T], ABC):
"""
For tasks that require DB access and runs fast
"""
MAX_ONGOING_TASK = 5
# Path: prime_backup/mcdr/task/general/help_message_utils.py
class HelpMessageLine(NamedTuple):
def is_help(self) -> bool:
def parse_help_message(msg: RTextBase) -> List[HelpMessageLine]:
# Path: prime_backup/mcdr/task/general/show_help_task.py
class ShowHelpTask(ImmediateTask[None]):
COMMANDS_WITH_DETAILED_HELP = [
'back',
'crontab',
'database',
'export',
'import',
'list',
'tag',
]
def __init__(self, source: CommandSource, what: Optional[str] = None):
super().__init__(source)
self.what = what
@property
def id(self) -> str:
return 'help'
def reply(self, msg: Union[str, RTextBase], *, with_prefix: bool = False):
super().reply(msg, with_prefix=with_prefix)
@property
def __cmd_prefix(self) -> str:
return self.config.command.prefix
def __reply_help(self, msg: RTextBase, hide_for_permission: bool = False):
for h in help_message_utils.parse_help_message(msg):
if hide_for_permission and h.is_help() and not self.source.has_permission(h.permission):
continue
self.reply(h.text)
def __has_permission(self, literal: str) -> bool:
return self.source.has_permission(self.config.command.permission.get(literal))
def run(self) -> None:
def get_standalone_formats() -> str:
from prime_backup.types.standalone_backup_format import StandaloneBackupFormat
return ', '.join([f'§3{ebf.name}§r' for ebf in StandaloneBackupFormat])
with self.source.preferred_language_context():
if self.what is None:
self.reply(self.tr('commands.title').set_color(TextColors.help_title))
self.__reply_help(self.tr('commands.content', prefix=self.__cmd_prefix), True)
self.reply(self.tr('arguments.title').set_color(TextColors.help_title))
self.__reply_help(self.tr('arguments.content'))
self.reply(self.tr('other.title').set_color(TextColors.help_title))
self.reply_tr(
'other.nodes_with_help',
RTextBase.join(
RText(', ', RColor.dark_gray),
[
RText(cmd, RColor.gray).
h(TextComponents.command(f'help {cmd}')).
c(RAction.suggest_command, mkcmd(f'help {cmd}'))
for cmd in self.COMMANDS_WITH_DETAILED_HELP
if self.__has_permission(cmd)
]
)
)
self.reply_tr(
'other.docs',
TextComponents.url(constants.DOCUMENTATION_URL, click=True).
h(self.tr('other.docs.hover'))
)
elif self.what in self.COMMANDS_WITH_DETAILED_HELP:
if not self.__has_permission(self.what):
self.reply_tr('permission_denied', RText(self.what, RColor.gray))
return
kwargs = {'prefix': self.__cmd_prefix}
if self.what == 'crontab':
kwargs['job_ids'] = ', '.join([f'{TextColors.job_id.mc_code}{jid.name}§r' for jid in CrontabJobId])
elif self.what == 'database':
name = mcdr_globals.metadata.name
kwargs['name'] = name
kwargs['compress_methods'] = ', '.join([f'§d{cm.name}§r' for cm in CompressMethod])
kwargs['hash_methods'] = ', '.join([f'§d{hm.name}§r' for hm in HashMethod])
if self.config.database.compact.enabled:
kwargs['scheduled_compact_notes'] = self.tr(
f'node_help.{self.what}.scheduled_compact.on',
name=name,
cmd=f"§7{mkcmd(f'crontab {CrontabJobId.vacuum_sqlite.name}')}§r",
)
else:
kwargs['scheduled_compact_notes'] = self.tr(f'node_help.{self.what}.scheduled_compact.off')
elif self.what == 'export':
kwargs['export_formats'] = get_standalone_formats()
kwargs['backup_meta_file_name'] = f'§3{constants.BACKUP_META_FILE_NAME}§r'
elif self.what == 'import':
kwargs['backup_formats'] = get_standalone_formats()
self.__reply_help(self.tr(f'node_help.{self.what}', **kwargs))
else:
raise ValueError(self.what)
# Path: prime_backup/mcdr/text_components.py
class TextComponents:
@classmethod
def tr(cls, key, *args, **kwargs):
from prime_backup.utils.mcdr_utils import tr
return tr('text_components.' + key, *args, **kwargs)
@classmethod
def auto(cls, value: Any) -> RTextBase:
if isinstance(value, bool):
return cls.boolean(value)
elif isinstance(value, (int, float)):
return cls.number(value)
elif isinstance(value, Duration):
return cls.duration(value)
elif isinstance(value, Operator):
return cls.operator(value)
elif isinstance(value, ByteCount):
return cls.file_size(value)
elif isinstance(value, Path):
return cls.file_name(value)
elif isinstance(value, datetime.datetime):
return cls.date(value)
else:
return RTextBase.from_any(value)
@classmethod
def backup_brief(cls, backup: BackupInfo, *, backup_id_fancy: bool = True) -> RTextBase:
# "backup #1: foobar"
return RTextList(cls.tr(
'backup_brief',
cls.backup_id(backup.id, hover=backup_id_fancy, click=backup_id_fancy),
cls.backup_comment(backup.comment),
))
@classmethod
def backup_comment(cls, comment: str) -> RTextBase:
if len(comment) > 0:
if (er := backup_utils.extract_backup_comment_translation_key(comment)) is not None:
args = er.args
if er.key == 'pre_restore' and len(args) == 0:
args = ('?',)
return cls.tr(f'backup_comment.{er.key}', *args)
return RText(comment)
else:
return cls.tr('backup_comment.none').set_color(RColor.gray).set_styles(RStyle.italic)
@classmethod
def backup_date(cls, backup: BackupInfo):
return cls.date(backup.date)
@classmethod
def backup_full(cls, backup: BackupInfo, operation_buttons: bool = False, *, show_flags: bool = False) -> RTextBase:
# "[#1] [>] [x] H-- 1.2GiB 2023-11-30 09:30:13: foobar"
t_bid = cls.backup_id(backup.id)
rtl = RTextList(RText('[', RColor.gray), t_bid, RText('] ', RColor.gray))
if operation_buttons:
rtl.append(RText('[>]', color=RColor.dark_green).h(cls.tr('backup_full.restore', t_bid)).c(RAction.suggest_command, mkcmd(f'back {backup.id}')), ' ')
if not backup.tags.is_protected():
rtl.append(RText('[x]', color=RColor.red).h(cls.tr('backup_full.delete', t_bid)).c(RAction.suggest_command, mkcmd(f'delete {backup.id}')), ' ')
else:
rtl.append(RText('[x]', color=RColor.dark_gray).h(cls.tr('backup_full.protected', t_bid)), ' ')
if show_flags:
for name in [BackupTagName.hidden, BackupTagName.pre_restore_backup, BackupTagName.protected]:
misc_utils.assert_true(name.value.type is bool, 'it should be a bool field')
flag = backup.tags.get(name) is True
if flag:
rtl.append(name.value.flag)
else:
rtl.append(RText('-', RColor.dark_gray))
rtl.append(' ')
rtl.append(
cls.backup_size(backup), ' ',
cls.backup_date(backup), RText(': ', RColor.gray),
cls.backup_comment(backup.comment).h(cls.tr('backup_full.creator', cls.operator(backup.creator))),
)
return rtl
@classmethod
def backup_id(cls, backup_id: Union[int, BackupInfo], *, hover: bool = True, click: bool = True) -> RTextBase:
if isinstance(backup_id, BackupInfo):
backup_id = backup_id.id
text = RText(f'#{backup_id}', TextColors.backup_id)
if hover:
text.h(cls.tr('backup_id.hover', RText(backup_id, TextColors.backup_id)))
if click:
text.c(RAction.run_command, mkcmd(f'show {backup_id}'))
return text
@classmethod
def backup_id_list(cls, backup_ids: Iterable[Any], **kwargs) -> RTextBase:
return RTextList(
'[',
RTextBase.join(', ', [cls.backup_id(backup_id, **kwargs) for backup_id in backup_ids]),
']',
)
@classmethod
def backup_size(cls, backup_or_blob_list_summary: Union[BackupInfo, BlobListSummary], *, ndigits: int = 2) -> RTextBase:
b = backup_or_blob_list_summary
return cls.file_size(b.raw_size, ndigits=ndigits).h(cls.dual_size_hover(b.raw_size, b.stored_size))
@classmethod
def blob_list_summary_store_size(cls, bls: BlobListSummary) -> RTextBase:
return cls.file_size(bls.raw_size).h(cls.dual_size_hover(bls.raw_size, bls.stored_size))
@classmethod
def boolean(cls, value: bool) -> RTextBase:
return RText(str(value).lower(), RColor.green if value else RColor.red)
@classmethod
def command(cls, s: str, *, color: RColor = RColor.gray, suggest: bool = False, run: bool = False, raw: bool = False) -> RTextBase:
cmd = s if raw else mkcmd(s)
text = RText(cmd, color)
if suggest:
text.h(cls.tr('command.suggest', cmd)).c(RAction.suggest_command, cmd)
elif run:
text.h(cls.tr('command.run', cmd)).c(RAction.run_command, cmd)
return text
@classmethod
def compress_method(cls, compress_method: Union[str, CompressMethod]) -> RTextBase:
if isinstance(compress_method, CompressMethod):
compress_method = compress_method.name
return RText(compress_method, RColor.light_purple)
@classmethod
def confirm_hint(cls, what: RTextBase, time_wait_text: Any):
return cls.tr(
'confirm_hint.base',
time_wait_text,
click_and_run(
RTextList(cls.tr('confirm_hint.confirm', what), '√').set_color(RColor.yellow),
cls.tr('confirm_hint.confirm.hover', cls.command('confirm'), what),
mkcmd('confirm'),
),
click_and_run(
RTextList(cls.tr('confirm_hint.abort', what), '×').set_color(RColor.gold),
cls.tr('confirm_hint.abort.hover', cls.command('abort'), what),
mkcmd('abort'),
),
)
@classmethod
def crontab(cls, crontab_str: str) -> RTextBase:
url = 'https://crontab.guru/#' + crontab_str.replace(' ', '_')
return RText(crontab_str, TextColors.date).h(cls.tr('crontab.help_url', cls.url(url, click=False))).c(RAction.open_url, url)
@classmethod
def date_diff(cls, date: datetime.datetime) -> RTextBase:
now = datetime.datetime.now(date.tzinfo)
diff = (date - now).total_seconds()
if diff >= 0:
return cls.tr('date_diff.later', cls.duration(diff))
else:
return cls.tr('date_diff.ago', cls.duration(-diff))
@classmethod
def date(cls, date: Union[datetime.datetime, int]) -> RTextBase:
if isinstance(date, int):
date = conversion_utils.timestamp_to_local_date(date)
return RText(conversion_utils.datetime_to_str(date), TextColors.date).h(cls.date_diff(date))
@classmethod
def dual_size_hover(cls, raw_size: int, stored_size: int, *, ndigits: int = 2) -> RTextBase:
t_raw_size = cls.file_size(raw_size, ndigits=ndigits)
t_stored_size = cls.file_size(stored_size, ndigits=ndigits)
t_percent = cls.percent(stored_size, raw_size)
return cls.tr('dual_size_hover', t_stored_size, t_percent, t_raw_size)
@classmethod
def duration(cls, seconds_or_duration: Union[float, Duration], *, color: Optional[RColor] = TextColors.number, ndigits: int = 2) -> RTextBase:
# full duration text, e.g. "1 minute", "2 hours"
if isinstance(seconds_or_duration, Duration):
duration = seconds_or_duration
elif isinstance(seconds_or_duration, (int, float)):
duration = Duration(seconds_or_duration)
else:
raise TypeError(type(seconds_or_duration))
value, unit = duration.auto_format()
plural_suffix = cls.tr('duration.plural_suffix') if value != 1 else ''
text = cls.tr('duration.text', round(value, ndigits), cls.tr('duration.' + unit, plural_suffix))
if color is not None:
text.set_color(color)
return text
@classmethod
def file_mode(cls, mode: int) -> RTextBase:
if stat.S_ISREG(mode):
type_flag = '-'
color = RColor.light_purple
elif stat.S_ISDIR(mode):
type_flag = 'd'
color = RColor.blue
elif stat.S_ISLNK(mode):
type_flag = 'l'
color = RColor.aqua
else:
type_flag = '?'
color = RColor.gray
permissions = ''
for i in range(9):
permissions += 'rwx'[i % 3] if (mode >> (8 - i)) & 1 == 1 else '-'
return RText(type_flag + permissions, color)
@classmethod
def file_name(cls, file_path: Path) -> RTextBase:
return RText(file_path.name, TextColors.file).h(file_path.as_posix())
@classmethod
def file_size(cls, byte_cnt: Union[int, ByteCount], *, ndigits: int = 2, always_sign: bool = False, color: RColor = TextColors.byte_count) -> RTextBase:
if not isinstance(byte_cnt, ByteCount):
byte_cnt = ByteCount(byte_cnt)
return RText(byte_cnt.auto_str(ndigits=ndigits, always_sign=always_sign), color=color)
@classmethod
def hash_method(cls, hash_method: Union[str, HashMethod]) -> RTextBase:
if isinstance(hash_method, HashMethod):
hash_method = hash_method.name
return RText(hash_method, RColor.light_purple)
@classmethod
def number(cls, value: Any) -> RTextBase:
return RText(value, TextColors.number)
@classmethod
def number_list(cls, values: Iterable[Any]) -> RTextBase:
return RTextList(
'[',
RTextBase.join(', ', [cls.number(v) for v in values]),
']',
)
@classmethod
def operator(cls, op: Operator) -> RTextBase:
tr_key = f'operator.{op.type}'
if op.type in ['player', 'command_source', 'unknown']:
return cls.tr(tr_key, op.name)
elif op.type in ['console']:
return cls.tr(tr_key)
elif op.type == constants.PLUGIN_ID:
from prime_backup.mcdr import mcdr_globals
t_name = cls.tr(tr_key + '.' + op.name)
if not mcdr_globals.server.has_translation(misc_utils.ensure_type(getattr(t_name, 'translation_key'), str)):
t_name = RText(op.name, styles=RStyle.italic)
return RTextList(cls.tr(tr_key), RText('-', RColor.gray), t_name).set_color(RColor.dark_aqua)
else:
return RText(f'{op.type}:{op.name}')
@classmethod
def percent(cls, value: float, total: float) -> RTextBase:
if total != 0:
return RText(f'{100 * value / total:.1f}%', RColor.dark_green)
else:
return RText('N/A', RColor.gray)
@classmethod
def tag_name(cls, tag_name: BackupTagName) -> RTextBase:
return RText(tag_name.name, TextColors.backup_tag).h(tag_name.value.text)
@classmethod
def title(cls, text: Any) -> RTextBase:
return RTextList(RText('======== ', RColor.gray), text, RText(' ========', RColor.gray))
@classmethod
def url(cls, url: str, *, click: bool = True) -> RTextBase:
text = RText(url, RColor.blue, RStyle.underlined)
if click:
text.c(RAction.open_url, url)
return text
# Path: prime_backup/mcdr/text_components.py
class TextColors:
backup_id = RColor.gold
backup_tag = RColor.aqua
byte_count = RColor.green
date = RColor.aqua
file = RColor.dark_aqua
help_title = RColor.light_purple
job_id = RColor.green
number = RColor.yellow
# Path: prime_backup/types/backup_filter.py
class BackupFilter:
id_start: Optional[int] = None
id_end: Optional[int] = None
creator: Optional[Operator] = None
timestamp_start: Optional[int] = None
timestamp_end: Optional[int] = None
tag_filters: List[BackupTagFilter] = dataclasses.field(default_factory=list)
def filter_pre_restore_backup(self) -> 'BackupFilter':
self.tag_filters.append(BackupTagFilter(BackupTagName.pre_restore_backup, True, BackupTagFilter.Policy.equals))
return self
def filter_non_pre_restore_backup(self) -> 'BackupFilter':
self.tag_filters.append(BackupTagFilter(BackupTagName.pre_restore_backup, True, BackupTagFilter.Policy.not_equals))
return self
def filter_non_hidden_backup(self) -> 'BackupFilter':
self.tag_filters.append(BackupTagFilter(BackupTagName.hidden, True, BackupTagFilter.Policy.not_equals))
return self
def filter_non_protected_backup(self) -> 'BackupFilter':
self.tag_filters.append(BackupTagFilter(BackupTagName.protected, True, BackupTagFilter.Policy.not_equals))
return self
# Path: prime_backup/utils/mcdr_utils.py
def mkcmd(s: str) -> str:
from prime_backup.config.config import Config
cmd = Config.get().command.prefix
if len(s) > 0:
cmd += ' ' + s
return cmd
# Path: prime_backup/mcdr/task/general/show_welcome_task.py
from typing import Dict, Union
from mcdreforged.api.all import *
from prime_backup.action.list_backup_action import ListBackupAction
from prime_backup.mcdr import mcdr_globals
from prime_backup.mcdr.task.basic_task import LightTask
from prime_backup.mcdr.task.general import help_message_utils
from prime_backup.mcdr.task.general.show_help_task import ShowHelpTask
from prime_backup.mcdr.text_components import TextComponents, TextColors
from prime_backup.types.backup_filter import BackupFilter
from prime_backup.utils.mcdr_utils import mkcmd
class ShowWelcomeTask(LightTask[None]):
BACKUP_NUMBER_TO_SHOW = 3
COMMON_COMMANDS = ['', 'help', 'make', 'back', 'list', 'show', 'rename', 'delete', 'confirm', 'abort']
@property
def id(self) -> str:
return 'welcome'
def reply(self, msg: Union[str, RTextBase], *, with_prefix: bool = False):
super().reply(msg, with_prefix=with_prefix)
@property
def __cmd_prefix(self) -> str:
return self.config.command.prefix
def __generate_command_helps(self) -> Dict[str, RTextBase]:
msg = ShowHelpTask(self.source).tr('commands.content', prefix=self.__cmd_prefix)
with self.source.preferred_language_context():
return {h.literal: h.text for h in help_message_utils.parse_help_message(msg)}
def run(self) -> None:
self.reply(TextComponents.title(self.tr(
'title',
name=RText(mcdr_globals.metadata.name, RColor.dark_aqua),
version=RText(f'v{mcdr_globals.metadata.version}', RColor.gold),
)))
self.reply(mcdr_globals.metadata.get_description_rtext())
self.reply(
self.tr('common_commands').
set_color(TextColors.help_title).
h(self.tr('common_commands.hover', TextComponents.command('help'))).
c(RAction.suggest_command, mkcmd('help'))
)
helps = self.__generate_command_helps()
for cmd in self.COMMON_COMMANDS:
self.reply(helps[cmd])
backup_filter = BackupFilter()
backup_filter.filter_non_pre_restore_backup()
backups = ListBackupAction(backup_filter=backup_filter, limit=self.BACKUP_NUMBER_TO_SHOW).run()
self.reply(self.tr('recent_backups', len(backups)).set_color(TextColors.help_title))
for backup in backups:
self.reply(TextComponents.backup_full(backup, operation_buttons=True))
self.reply(self.tr('quick_actions.title').set_color(TextColors.help_title))
with self.source.preferred_language_context():
buttons = [
RTextList('[', self.tr('quick_actions.create'), ']').
set_color(RColor.green).
h(TextComponents.command('make')).
c(RAction.suggest_command, mkcmd('make ' + self.tr('quick_actions.create.comment').to_plain_text()))
]
if len(backups) > 0:
buttons.append(
RTextList('[', self.tr('quick_actions.restore', TextComponents.backup_brief(backups[0])), ']').
set_color(RColor.red).
h(TextComponents.command('back')).
c(RAction.suggest_command, mkcmd('back'))
)
| self.reply(RTextBase.join(' ', buttons)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: halleewong/ScribblePrompt
# Path: segment_anything/modeling/sam.py
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: List[float] = [123.675, 116.28, 103.53],
pixel_std: List[float] = [58.395, 57.12, 57.375],
) -> None:
"""
SAM predicts object masks from an image and input prompts.
Arguments:
image_encoder (ImageEncoderViT): The backbone used to encode the
image into image embeddings that allow for efficient mask prediction.
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
and encoded prompts.
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
pixel_std (list(float)): Std values for normalizing pixels in the input image.
"""
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
@property
def device(self) -> Any:
return self.pixel_mean.device
@torch.no_grad()
def forward(
self,
batched_input: List[Dict[str, Any]],
multimask_output: bool,
) -> List[Dict[str, torch.Tensor]]:
"""
Predicts masks end-to-end from provided images and prompts.
If prompts are not known in advance, using SamPredictor is
recommended over calling the model directly.
Arguments:
batched_input (list(dict)): A list over input images, each a
dictionary with the following keys. A prompt key can be
excluded if it is not present.
'image': The image as a torch tensor in 3xHxW format,
already transformed for input to the model.
'original_size': (tuple(int, int)) The original size of
the image before transformation, as (H, W).
'point_coords': (torch.Tensor) Batched point prompts for
this image, with shape BxNx2. Already transformed to the
input frame of the model.
'point_labels': (torch.Tensor) Batched labels for point prompts,
with shape BxN.
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
Already transformed to the input frame of the model.
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
in the form Bx1xHxW.
multimask_output (bool): Whether the model should predict multiple
disambiguating masks, or return a single mask.
Returns:
(list(dict)): A list over input images, where each element is
as dictionary with the following keys.
'masks': (torch.Tensor) Batched binary mask predictions,
with shape BxCxHxW, where B is the number of input prompts,
C is determined by multimask_output, and (H, W) is the
original size of the image.
'iou_predictions': (torch.Tensor) The model's predictions
of mask quality, in shape BxC.
'low_res_logits': (torch.Tensor) Low resolution logits with
shape BxCxHxW, where H=W=256. Can be passed as mask input
to subsequent iterations of prediction.
"""
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
image_embeddings = self.image_encoder(input_images)
outputs = []
for image_record, curr_embedding in zip(batched_input, image_embeddings):
if "point_coords" in image_record:
points = (image_record["point_coords"], image_record["point_labels"])
else:
points = None
sparse_embeddings, dense_embeddings = self.prompt_encoder(
points=points,
boxes=image_record.get("boxes", None),
masks=image_record.get("mask_inputs", None),
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=curr_embedding.unsqueeze(0),
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
masks = self.postprocess_masks(
low_res_masks,
input_size=image_record["image"].shape[-2:],
original_size=image_record["original_size"],
)
masks = masks > self.mask_threshold
outputs.append(
{
"masks": masks,
"iou_predictions": iou_predictions,
"low_res_logits": low_res_masks,
}
)
return outputs
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
masks = F.interpolate(
masks,
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
return masks
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - self.pixel_mean) / self.pixel_std
# Pad
h, w = x.shape[-2:]
padh = self.image_encoder.img_size - h
padw = self.image_encoder.img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x
# Path: segment_anything/modeling/image_encoder.py
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + self.pos_embed
for blk in self.blocks:
x = blk(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
# Path: segment_anything/modeling/mask_decoder.py
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
transformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
else:
mask_slice = slice(0, 1)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
#src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = image_embeddings # per https://github.com/facebookresearch/segment-anything/issues/365 to fix error with batch size > 1
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Path: segment_anything/modeling/prompt_encoder.py
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
# Path: segment_anything/modeling/transformer.py
class TwoWayTransformer(nn.Module):
def __init__(
self,
depth: int,
embedding_dim: int,
num_heads: int,
mlp_dim: int,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
"""
A transformer decoder that attends to an input image using
queries whose positional embedding is supplied.
Args:
depth (int): number of layers in the transformer
embedding_dim (int): the channel dimension for the input embeddings
num_heads (int): the number of heads for multihead attention. Must
divide embedding_dim
mlp_dim (int): the channel dimension internal to the MLP block
activation (nn.Module): the activation to use in the MLP block
"""
super().__init__()
self.depth = depth
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.layers = nn.ModuleList()
for i in range(depth):
self.layers.append(
TwoWayAttentionBlock(
embedding_dim=embedding_dim,
num_heads=num_heads,
mlp_dim=mlp_dim,
activation=activation,
attention_downsample_rate=attention_downsample_rate,
skip_first_layer_pe=(i == 0),
)
)
self.final_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm_final_attn = nn.LayerNorm(embedding_dim)
def forward(
self,
image_embedding: Tensor,
image_pe: Tensor,
point_embedding: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Args:
image_embedding (torch.Tensor): image to attend to. Should be shape
B x embedding_dim x h x w for any h and w.
image_pe (torch.Tensor): the positional encoding to add to the image. Must
have the same shape as image_embedding.
point_embedding (torch.Tensor): the embedding to add to the query points.
Must have shape B x N_points x embedding_dim for any N_points.
Returns:
torch.Tensor: the processed point_embedding
torch.Tensor: the processed image_embedding
"""
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
bs, c, h, w = image_embedding.shape
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
image_pe = image_pe.flatten(2).permute(0, 2, 1)
# Prepare queries
queries = point_embedding
keys = image_embedding
# Apply transformer blocks and final layernorm
for layer in self.layers:
queries, keys = layer(
queries=queries,
keys=keys,
query_pe=point_embedding,
key_pe=image_pe,
)
# Apply the final attention layer from the points to the image
q = queries + point_embedding
k = keys + image_pe
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm_final_attn(queries)
return queries, keys
# Path: segment_anything/build_sam.py
import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
| checkpoint=checkpoint, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TACJu/MaXTron
# Path: MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/NormalCell.py
class NormalCell(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, class_token=False, group=64, tokens_type='transformer',
shift_size=0, window_size=0, img_size=224, cpe=False, num_deform=None):
super().__init__()
self.H = None
self.W = None
self.norm1 = norm_layer(dim)
self.class_token = class_token
self.img_size = img_size
self.window_size = window_size
if shift_size > 0 and self.img_size > self.window_size:
self.shift_size = shift_size
else:
self.shift_size = 0
self.tokens_type = tokens_type
if tokens_type == 'VSA':
self.cpe = cpe
if self.cpe:
# hard code
self.pos = nn.Conv2d(dim, dim, 7, 1, 3, groups=dim, bias=True)
print('using residual cpe before attention')
self.attn = VSAWindowAttention(
dim, out_dim=dim, num_heads=num_heads, window_size=window_size, qkv_bias=True, qk_scale=None,
attn_drop=attn_drop, proj_drop=drop, )
elif tokens_type == 'transformer':
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
elif tokens_type == 'performer':
self.attn = AttentionPerformer(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
elif tokens_type == 'window':
self.attn = WindowAttention(
in_dim=dim, out_dim=dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, relative_pos=False)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.PCM = nn.Sequential(
nn.Conv2d(dim, mlp_hidden_dim, 3, 1, 1, 1, group),
nn.BatchNorm2d(mlp_hidden_dim),
nn.SiLU(inplace=True),
nn.Conv2d(mlp_hidden_dim, dim, 3, 1, 1, 1, group),
nn.BatchNorm2d(dim),
nn.SiLU(inplace=True),
nn.Conv2d(dim, dim, 3, 1, 1, 1, group),
)
def forward(self, x):
b, n, c = x.shape
H, W = self.H, self.W
assert n == H*W
shortcut = x
if self.tokens_type == 'window':
padding_td = (self.window_size - H % self.window_size) % self.window_size
padding_top = padding_td // 2
padding_down = padding_td - padding_top
padding_lr = (self.window_size - W % self.window_size) % self.window_size
padding_left = padding_lr // 2
padding_right = padding_lr - padding_left
if self.shift_size > 0 and min(H, W) > self.window_size:
shift_size = self.shift_size
else:
shift_size = 0
if shift_size > 0:
# calculate attention mask for SW-MSA
# H, W = self.img_size, self.img_size
img_mask = torch.zeros((1, H+padding_td, W+padding_lr, 1)).cuda() # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -shift_size),
slice(-shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -shift_size),
slice(-shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.reshape(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
x = self.norm1(x)
x = x.reshape(b, H, W, c).permute(0, 3, 1, 2).contiguous()
x = nn.functional.pad(x, (padding_left, padding_right, padding_top, padding_down))
x = x.permute(0, 2, 3, 1)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.reshape(-1, self.window_size * self.window_size, c) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.reshape(-1, self.window_size, self.window_size, c)
shifted_x = window_reverse(attn_windows, self.window_size, H+padding_td, W+padding_lr) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x[:, padding_top:padding_top+H, padding_left:padding_left+W, :]
x = x.reshape(b, H * W, c)
elif self.tokens_type == 'VSA':
# H, W = self.img_size, self.img_size
# assert n == self.img_size * self.img_size, "input feature has wrong size"
if self.cpe:
x = x + self.pos(x.permute(0, 2, 1).reshape(b, c, H, W)).reshape(b, c, n).permute(0, 2, 1).contiguous()
x = self.norm1(x)
x = x.reshape(b, H, W, c)
x = x.permute(0, 3, 1, 2).contiguous()
x = self.attn(x)
x = x.permute(0, 2, 3, 1).reshape(b, n, c)
else:
x = self.attn(self.norm1(x))
if self.class_token:
n = n - 1
wh = int(math.sqrt(n))
convX = self.drop_path(self.PCM(shortcut[:, 1:, :].reshape(b, wh, wh, c).permute(0, 3, 1, 2).contiguous()).permute(0, 2, 3, 1).reshape(b, n, c))
x = shortcut + self.drop_path(x)
x[:, 1:] = x[:, 1:] + convX
else:
# wh = int(math.sqrt(n))
convX = self.drop_path(self.PCM(shortcut.reshape(b, H, W, c).permute(0, 3, 1, 2).contiguous()).permute(0, 2, 3, 1).reshape(b, n, c))
x = shortcut + self.drop_path(x) + convX
# x = x + convX
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
# Path: MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/ReductionCell.py
class ReductionCell(nn.Module):
def __init__(self, img_size=224, in_chans=3, embed_dims=64, wide_pcm=False, token_dims=64, downsample_ratios=4, kernel_size=7,
num_heads=1, dilations=[1,2,3,4], share_weights=False, op='cat', tokens_type='performer', group=1,
relative_pos=False, cpe=False, drop=0., attn_drop=0., drop_path=0., mlp_ratio=1.0, window_size=7, num_deform=None):
super().__init__()
self.img_size = img_size
self.window_size = window_size
self.op = op
self.dilations = dilations
self.num_heads = num_heads
self.embed_dims = embed_dims
self.token_dims = token_dims
self.in_chans = in_chans
self.downsample_ratios = downsample_ratios
self.kernel_size = kernel_size
self.outSize = img_size
self.relative_pos = relative_pos
self.cpe = cpe
PCMStride = []
residual = downsample_ratios // 2
for _ in range(3):
PCMStride.append((residual > 0) + 1)
residual = residual // 2
assert residual == 0
self.pool = None
self.tokens_type = tokens_type
if tokens_type == 'pooling':
PCMStride = [1, 1, 1]
self.pool = nn.MaxPool2d(downsample_ratios, stride=downsample_ratios, padding=0)
tokens_type = 'transformer'
self.outSize = self.outSize // downsample_ratios
downsample_ratios = 1
if not wide_pcm:
self.PCM = nn.Sequential(
nn.Conv2d(in_chans, embed_dims, kernel_size=(3, 3), stride=PCMStride[0], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(embed_dims),
nn.SiLU(inplace=True),
nn.Conv2d(embed_dims, embed_dims, kernel_size=(3, 3), stride=PCMStride[1], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(embed_dims),
nn.SiLU(inplace=True),
nn.Conv2d(embed_dims, token_dims, kernel_size=(3, 3), stride=PCMStride[2], padding=(1, 1), groups=group), # the 1st convolution
)
else:
self.PCM = nn.Sequential(
nn.Conv2d(in_chans, token_dims*2, kernel_size=(3, 3), stride=PCMStride[0], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(token_dims*2),
nn.SiLU(inplace=True),
nn.Conv2d(token_dims*2, token_dims*2, kernel_size=(3, 3), stride=PCMStride[1], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(token_dims*2),
nn.SiLU(inplace=True),
nn.Conv2d(token_dims*2, token_dims, kernel_size=(3, 3), stride=PCMStride[2], padding=(1, 1), groups=group), # the 1st convolution
)
self.PRM = PRM(img_size=img_size, kernel_size=kernel_size, downsample_ratio=downsample_ratios, dilations=self.dilations,
in_chans=in_chans, embed_dim=embed_dims, share_weights=share_weights, op=op)
self.outSize = self.outSize // downsample_ratios
in_chans = self.PRM.out_chans
if tokens_type == 'performer':
# assert num_heads == 1
self.attn = Token_performer(dim=in_chans, in_dim=token_dims, head_cnt=num_heads, kernel_ratio=0.5)
elif tokens_type == 'performer_less':
self.attn = None
self.PCM = None
elif tokens_type == 'transformer':
self.attn = Token_transformer(dim=in_chans, in_dim=token_dims, num_heads=num_heads, mlp_ratio=mlp_ratio, drop=drop,
attn_drop=attn_drop, drop_path=drop_path)
elif tokens_type == 'window':
self.attn = WindowTransformerBlock(in_dim=in_chans, out_dim=token_dims, input_resolution=(self.img_size//self.downsample_ratios, self.img_size//self.downsample_ratios),
num_heads=num_heads, mlp_ratio=mlp_ratio, drop=drop,
attn_drop=attn_drop, drop_path=drop_path, window_size=window_size, shift_size=0, relative_pos=relative_pos)
elif tokens_type == 'VSA':
# self.attn = None
self.norm1 = nn.LayerNorm(in_chans)
if self.cpe:
# self.pos = nn.Conv2d(in_chans, in_chans, window_size//2*2+1, 1, window_size//2, groups=in_chans, bias=True)
self.pos = nn.Conv2d(in_chans, in_chans, 7, 1, 3, groups=in_chans, bias=True)
# bug
print('using residual cpe before attention')
self.attn = VSAWindowAttention(
in_chans, out_dim=token_dims, num_heads=num_heads, window_size=window_size, qkv_bias=True, qk_scale=None,
attn_drop=attn_drop, proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.LayerNorm(token_dims)
mlp_hidden_dim = int(token_dims * mlp_ratio)
self.mlp = Mlp(in_features=token_dims, hidden_features=mlp_hidden_dim, out_features=token_dims, act_layer=nn.GELU, drop=drop)
self.num_patches = (img_size // 2) * (img_size // 2) # there are 3 sfot split, stride are 4,2,2 seperately
def forward(self, x, size):
H, W = size
if len(x.shape) < 4:
B, N, C = x.shape
# n = int(np.sqrt(N))
x = x.reshape(B, H, W, C).contiguous()
x = x.permute(0, 3, 1, 2)
if self.pool is not None:
x = self.pool(x)
shortcut = x
PRM_x, _ = self.PRM(x)
H, W = H // self.downsample_ratios, W // self.downsample_ratios
B, N, C = PRM_x.shape
assert N == H * W
if self.tokens_type == 'VSA':
if self.cpe:
PRM_x = PRM_x + self.pos(PRM_x.permute(0, 2, 1).reshape(B, C, H, W)).reshape(B, C, N).permute(0, 2, 1).contiguous()
x = self.norm1(PRM_x)
x = x.reshape(B, H, W, C)
x = x.permute(0, 3, 1, 2).contiguous()
x = self.attn(x).permute(0, 2, 3, 1).reshape(B, N, -1)
convX = self.PCM(shortcut)
x = x + self.drop_path(convX.permute(0, 2, 3, 1).reshape(B, N, -1))
x = x + self.drop_path(self.mlp(self.norm2(x)))
elif self.tokens_type == 'window':
x = self.attn.norm1(PRM_x)
padding_td = (self.window_size - H % self.window_size) % self.window_size
padding_top = padding_td // 2
padding_down = padding_td - padding_top
padding_lr = (self.window_size - W % self.window_size) % self.window_size
padding_left = padding_lr // 2
padding_right = padding_lr - padding_left
x = x.reshape(B, H, W, C).contiguous()
if (padding_td + padding_lr) > 0:
x = x.permute(0, 3, 1, 2)
x = nn.functional.pad(x, (padding_left, padding_right, padding_top, padding_down))
x = x.permute(0, 2, 3, 1).contiguous()
x_windows = window_partition(x, self.window_size)
x_windows = x_windows.reshape(-1, self.window_size * self.window_size, C)
attn_windows = self.attn.attn(x_windows, mask=self.attn.attn_mask) # nW*B, window_size*window_size, C
attn_windows = attn_windows.reshape(-1, self.window_size, self.window_size, self.token_dims)
shifted_x = window_reverse(attn_windows, self.window_size, H+padding_td, W+padding_lr).contiguous() # B H' W' C
x = shifted_x
x = x[:, padding_top:padding_top+H, padding_left:padding_left+W, :]
x = x.reshape(B, H * W, self.token_dims)
convX = self.PCM(shortcut)
convX = convX.permute(0, 2, 3, 1).reshape(*x.shape).contiguous()
x = x + self.attn.drop_path(convX * self.gamma2)
# x = shortcut + self.attn.drop_path(x)
# x = x + self.attn.drop_path(self.attn.mlp(self.attn.norm2(x)))
x = x + self.attn.drop_path(self.gamma3 * self.attn.mlp(self.attn.norm2(x)))
else:
if self.attn is None:
return PRM_x
convX = self.PCM(shortcut)
x = self.attn.attn(self.attn.norm1(PRM_x))
convX = convX.permute(0, 2, 3, 1).reshape(*x.shape).contiguous()
x = x + self.attn.drop_path(convX * self.gamma2)
x = x + self.attn.drop_path(self.gamma3 * self.attn.mlp(self.attn.norm2(x)))
return x, (H, W)
# Path: MaXTron_Tube-Link/mmdet/models/builder.py
BACKBONES = MODELS
# Path: MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa.py
from functools import partial
from timm.models.layers import trunc_normal_
from .vitaev2_vsa_modules.NormalCell import NormalCell
from .vitaev2_vsa_modules.ReductionCell import ReductionCell
from mmdet.mmcv_custom import load_checkpoint
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
import torch
import torch.nn as nn
import numpy as np
import torch.utils.checkpoint as checkpoint
class BasicLayer(nn.Module):
def __init__(self, img_size=224, in_chans=3, embed_dims=64, wide_pcm=False, token_dims=64, downsample_ratios=4, kernel_size=7, RC_heads=1, NC_heads=6, dilations=[1, 2, 3, 4],
RC_op='cat', RC_tokens_type='performer', NC_tokens_type='transformer', RC_group=1, NC_group=64, NC_depth=2, dpr=0.1, mlp_ratio=4., qkv_bias=True,
qk_scale=None, drop=0, attn_drop=0., norm_layer=nn.LayerNorm, class_token=False, window_size=7,
use_checkpoint=False, cpe=False):
super().__init__()
self.img_size = img_size
self.in_chans = in_chans
self.embed_dims = embed_dims
self.token_dims = token_dims
self.downsample_ratios = downsample_ratios
self.out_size = self.img_size // self.downsample_ratios
self.RC_kernel_size = kernel_size
self.RC_heads = RC_heads
self.NC_heads = NC_heads
self.dilations = dilations
self.RC_op = RC_op
self.RC_tokens_type = RC_tokens_type
self.RC_group = RC_group
| self.NC_group = NC_group |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: navervision/lincir
# Path: data_utils.py
def collate_fn(batch):
'''
function which discard None images in a batch when using torch DataLoader
:param batch: input_batch
:return: output_batch = input_batch - None_values
'''
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
# Path: data_utils.py
PROJECT_ROOT = Path(__file__).absolute().parents[1].absolute()
# Path: data_utils.py
def targetpad_transform(target_ratio: float, dim: int) -> torch.Tensor:
"""
CLIP-like preprocessing transform computed after using TargetPad pad
:param target_ratio: target ratio for TargetPad
:param dim: image output dimension
:return: CLIP-like torchvision Compose transform
"""
return Compose([
TargetPad(target_ratio, dim),
Resize(dim, interpolation=InterpolationMode.BICUBIC),
CenterCrop(dim),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
# Path: loader.py
class FashionIQDataset(Dataset):
"""
Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/datasets.py
FashionIQ dataset class for PyTorch.
The dataset can be used in 'relative' or 'classic' mode:
- In 'classic' mode the dataset yield :a dict with keys ['image', 'image_name']
- In 'relative' mode the dataset yield dict with keys:
- ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_captions'] when
split in ['train', 'val']
- ['reference_image', 'reference_name', 'relative_captions'] when split == test
"""
def __init__(self, dataset_path: Union[Path, str], split: Literal['train', 'val', 'test'], dress_types: List[str],
mode: Literal['relative', 'classic'], preprocess: callable, no_duplicates: Optional[bool] = False):
"""
:param dataset_path: path to the FashionIQ dataset
:param split: dataset split, should be in ['train, 'val', 'test']
:param dress_types: list of fashionIQ categories, each category should be in ['dress', 'shirt', 'toptee']
:param mode: dataset mode, should be in ['relative', 'classic']:
- In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']
- In 'relative' mode the dataset yield dict with keys:
- ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_captions']
when split in ['train', 'val']
- ['reference_image', 'reference_name', 'relative_captions'] when split == test
:param preprocess: function which preprocesses the image
:param no_duplicates: if True, the dataset will not yield duplicate images in relative mode, does not affect classic mode
"""
dataset_path = Path(dataset_path)
self.dataset_path = dataset_path
self.mode = mode
self.dress_types = dress_types
self.split = split
self.no_duplicates = no_duplicates
# Validate the inputs
if mode not in ['relative', 'classic']:
raise ValueError("mode should be in ['relative', 'classic']")
if split not in ['test', 'train', 'val']:
raise ValueError("split should be in ['test', 'train', 'val']")
for dress_type in dress_types:
if dress_type not in ['dress', 'shirt', 'toptee']:
raise ValueError("dress_type should be in ['dress', 'shirt', 'toptee']")
self.preprocess = preprocess
# get triplets made by (reference_image, target_image, a pair of relative captions)
self.triplets: List[dict] = []
for dress_type in dress_types:
with open(dataset_path / 'captions' / f'cap.{dress_type}.{split}.json') as f:
self.triplets.extend(json.load(f))
# Remove duplicats from
if self.no_duplicates:
seen = set()
new_triplets = []
for triplet in self.triplets:
if triplet['candidate'] not in seen:
seen.add(triplet['candidate'])
new_triplets.append(triplet)
self.triplets = new_triplets
# get the image names
self.image_names: list = []
for dress_type in dress_types:
with open(dataset_path / 'image_splits' / f'split.{dress_type}.{split}.json') as f:
self.image_names.extend(json.load(f))
print(f"FashionIQ {split} - {dress_types} dataset in {mode} mode initialized")
def __getitem__(self, index) -> dict:
try:
if self.mode == 'relative':
relative_captions = self.triplets[index]['captions']
reference_name = self.triplets[index]['candidate']
if self.split in ['train', 'val']:
reference_image_path = self.dataset_path / 'images' / f"{reference_name}.jpg"
reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]
target_name = self.triplets[index]['target']
target_image_path = self.dataset_path / 'images' / f"{target_name}.jpg"
target_image = self.preprocess(PIL.Image.open(target_image_path), return_tensors='pt')['pixel_values'][0]
return {
'reference_image': reference_image,
'reference_name': reference_name,
'target_image': target_image,
'target_name': target_name,
'relative_captions': relative_captions
}
elif self.split == 'test':
reference_image_path = self.dataset_path / 'images' / f"{reference_name}.jpg"
reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]
return {
'reference_image': reference_image,
'reference_name': reference_name,
'relative_captions': relative_captions
}
elif self.mode == 'classic':
image_name = self.image_names[index]
image_path = self.dataset_path / 'images' / f"{image_name}.jpg"
image = self.preprocess(PIL.Image.open(image_path), return_tensors='pt')['pixel_values'][0]
return {
'image': image,
'image_name': image_name
}
else:
raise ValueError("mode should be in ['relative', 'classic']")
except Exception as e:
print(f"Exception: {e}")
def __len__(self):
if self.mode == 'relative':
return len(self.triplets)
elif self.mode == 'classic':
return len(self.image_names)
else:
raise ValueError("mode should be in ['relative', 'classic']")
# Path: loader.py
class CIRRDataset(Dataset):
"""
Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/datasets.py
CIRR dataset class for PyTorch dataloader.
The dataset can be used in 'relative' or 'classic' mode:
- In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']
- In 'relative' mode the dataset yield dict with keys:
- ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_caption', 'group_members']
when split in ['train', 'val']
- ['reference_image', 'reference_name' 'relative_caption', 'group_members', 'pair_id'] when split == test
"""
def __init__(self, dataset_path: Union[Path, str], split: Literal['train', 'val', 'test'],
mode: Literal['relative', 'classic'], preprocess: callable, no_duplicates: Optional[bool] = False):
"""
:param dataset_path: path to the CIRR dataset
:param split: dataset split, should be in ['train', 'val', 'test']
:param mode: dataset mode, should be in ['relative', 'classic']:
- In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']
- In 'relative' mode the dataset yield dict with keys:
- ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_caption',
'group_members'] when split in ['train', 'val']
- ['reference_image', 'reference_name' 'relative_caption', 'group_members', 'pair_id'] when split == test
:param preprocess: function which preprocesses the image
:param no_duplicates: if True, the dataset will not yield duplicate images in relative mode, does not affect classic mode
"""
dataset_path = Path(dataset_path)
self.dataset_path = dataset_path
self.preprocess = preprocess
self.mode = mode
self.split = split
self.no_duplicates = no_duplicates
if split == "test":
split = "test1"
self.split = "test1"
# Validate inputs
if split not in ['test1', 'train', 'val']:
raise ValueError("split should be in ['test1', 'train', 'val']")
if mode not in ['relative', 'classic']:
raise ValueError("mode should be in ['relative', 'classic']")
# get triplets made by (reference_image, target_image, relative caption)
with open(dataset_path / 'cirr' / 'captions' / f'cap.rc2.{split}.json') as f:
self.triplets = json.load(f)
# Remove duplicates from triplets
if self.no_duplicates:
seen = set()
new_triplets = []
for triplet in self.triplets:
if triplet['reference'] not in seen:
seen.add(triplet['reference'])
new_triplets.append(triplet)
self.triplets = new_triplets
# get a mapping from image name to relative path
with open(dataset_path / 'cirr' / 'image_splits' / f'split.rc2.{split}.json') as f:
self.name_to_relpath = json.load(f)
print(f"CIRR {split} dataset in {mode} mode initialized")
def __getitem__(self, index) -> dict:
try:
if self.mode == 'relative':
group_members = self.triplets[index]['img_set']['members']
reference_name = self.triplets[index]['reference']
relative_caption = self.triplets[index]['caption']
if self.split in ['train', 'val']:
reference_image_path = self.dataset_path / self.name_to_relpath[reference_name]
reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]
target_hard_name = self.triplets[index]['target_hard']
target_image_path = self.dataset_path / self.name_to_relpath[target_hard_name]
target_image = self.preprocess(PIL.Image.open(target_image_path), return_tensors='pt')['pixel_values'][0]
return {
'reference_image': reference_image,
'reference_name': reference_name,
'target_image': target_image,
'target_name': target_hard_name,
'relative_caption': relative_caption,
'group_members': group_members
}
elif self.split == 'test1':
pair_id = self.triplets[index]['pairid']
reference_image_path = self.dataset_path / self.name_to_relpath[reference_name]
reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]
return {
'reference_image': reference_image,
'reference_name': reference_name,
'relative_caption': relative_caption,
'group_members': group_members,
'pair_id': pair_id
}
elif self.mode == 'classic':
image_name = list(self.name_to_relpath.keys())[index]
image_path = self.dataset_path / self.name_to_relpath[image_name]
im = PIL.Image.open(image_path)
image = self.preprocess(im, return_tensors='pt')['pixel_values'][0]
return {
'image': image,
'image_name': image_name
}
else:
raise ValueError("mode should be in ['relative', 'classic']")
except Exception as e:
print(f"Exception: {e}")
def __len__(self):
if self.mode == 'relative':
return len(self.triplets)
elif self.mode == 'classic':
return len(self.name_to_relpath)
else:
raise ValueError("mode should be in ['relative', 'classic']")
# Path: loader.py
class CIRCODataset(Dataset):
"""
Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/datasets.py
CIRCO dataset class for PyTorch.
The dataset can be used in 'relative' or 'classic' mode:
- In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']
- In 'relative' mode the dataset yield dict with keys:
- ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_captions', 'shared_concept',
'gt_img_ids', 'query_id'] when split == 'val'
- ['reference_image', 'reference_name', 'relative_captions', 'shared_concept', 'query_id'] when split == test
"""
def __init__(self, dataset_path: Union[str, Path], split: Literal['val', 'test'],
mode: Literal['relative', 'classic'], preprocess: callable):
"""
Args:
dataset_path (Union[str, Path]): path to CIRCO dataset
split (str): dataset split, should be in ['test', 'val']
mode (str): dataset mode, should be in ['relative', 'classic']
preprocess (callable): function which preprocesses the image
"""
# Set dataset paths and configurations
dataset_path = Path(dataset_path)
self.mode = mode
self.split = split
self.preprocess = preprocess
self.data_path = dataset_path
# Ensure input arguments are valid
if mode not in ['relative', 'classic']:
raise ValueError("mode should be in ['relative', 'classic']")
if split not in ['test', 'val']:
raise ValueError("split should be in ['test', 'val']")
# Load COCO images information
with open(dataset_path / 'COCO2017_unlabeled' / "annotations" / "image_info_unlabeled2017.json", "r") as f:
imgs_info = json.load(f)
self.img_paths = [dataset_path / 'COCO2017_unlabeled' / "unlabeled2017" / img_info["file_name"] for img_info in
imgs_info["images"]]
self.img_ids = [img_info["id"] for img_info in imgs_info["images"]]
self.img_ids_indexes_map = {str(img_id): i for i, img_id in enumerate(self.img_ids)}
# get CIRCO annotations
with open(dataset_path / 'annotations' / f'{split}.json', "r") as f:
self.annotations: List[dict] = json.load(f)
# Get maximum number of ground truth images (for padding when loading the images)
self.max_num_gts = 23 # Maximum number of ground truth images
print(f"CIRCODataset {split} dataset in {mode} mode initialized")
def get_target_img_ids(self, index) -> Dict[str, int]:
"""
Returns the id of the target image and ground truth images for a given query
Args:
index (int): id of the query
Returns:
Dict[str, int]: dictionary containing target image id and a list of ground truth image ids
"""
return {
'target_img_id': self.annotations[index]['target_img_id'],
'gt_img_ids': self.annotations[index]['gt_img_ids']
}
def __getitem__(self, index) -> dict:
"""
Returns a specific item from the dataset based on the index.
In 'classic' mode, the dataset yields a dictionary with the following keys: [img, img_id]
In 'relative' mode, the dataset yields dictionaries with the following keys:
- [reference_img, reference_img_id, target_img, target_img_id, relative_caption, shared_concept, gt_img_ids,
query_id]
if split == val
- [reference_img, reference_img_id, relative_caption, shared_concept, query_id] if split == test
"""
if self.mode == 'relative':
# Get the query id
query_id = str(self.annotations[index]['id'])
# Get relative caption and shared concept
relative_caption = self.annotations[index]['relative_caption']
shared_concept = self.annotations[index]['shared_concept']
# Get the reference image
reference_img_id = str(self.annotations[index]['reference_img_id'])
reference_img_path = self.img_paths[self.img_ids_indexes_map[reference_img_id]]
reference_img = self.preprocess(PIL.Image.open(reference_img_path), return_tensors='pt')['pixel_values'][0]
if self.split == 'val':
# Get the target image and ground truth images
target_img_id = str(self.annotations[index]['target_img_id'])
gt_img_ids = [str(x) for x in self.annotations[index]['gt_img_ids']]
target_img_path = self.img_paths[self.img_ids_indexes_map[target_img_id]]
target_img = self.preprocess(PIL.Image.open(target_img_path), return_tensors='pt')['pixel_values'][0]
# Pad ground truth image IDs with zeros for collate_fn
gt_img_ids += [''] * (self.max_num_gts - len(gt_img_ids))
return {
'reference_image': reference_img,
'reference_name': reference_img_id,
'target_image': target_img,
'target_name': target_img_id,
'relative_caption': relative_caption,
'shared_concept': shared_concept,
'gt_img_ids': gt_img_ids,
'query_id': query_id,
}
elif self.split == 'test':
return {
'reference_image': reference_img,
'reference_name': reference_img_id,
'relative_caption': relative_caption,
'shared_concept': shared_concept,
'query_id': query_id,
}
elif self.mode == 'classic':
# Get image ID and image path
img_id = str(self.img_ids[index])
img_path = self.img_paths[index]
# Preprocess image and return
img = self.preprocess(PIL.Image.open(img_path), return_tensors='pt')['pixel_values'][0]
return {
'image': img,
'image_name': img_id
}
def __len__(self):
"""
Returns the length of the dataset.
"""
if self.mode == 'relative':
return len(self.annotations)
elif self.mode == 'classic':
return len(self.img_ids)
else:
raise ValueError("mode should be in ['relative', 'classic']")
# Path: encode_with_pseudo_tokens.py
def encode_with_pseudo_tokens_HF(clip_model: CLIPTextModelWithProjection, text: torch.Tensor, pseudo_tokens: torch.Tensor,
num_tokens=1, return_last_states=False) -> torch.Tensor:
x = clip_model.text_model.embeddings.token_embedding(text).type(clip_model.dtype) # [batch_size, n_ctx, d_model]
x = torch.where(text.unsqueeze(-1) == 259,
pseudo_tokens.unsqueeze(1).type(clip_model.dtype),
x)
x = x + clip_model.text_model.embeddings.position_embedding(clip_model.text_model.embeddings.position_ids)
_causal_attention_mask = _make_causal_mask(text.shape, x.dtype, device=x.device)
x = clip_model.text_model.encoder(inputs_embeds=x,
attention_mask=None,
causal_attention_mask=_causal_attention_mask,
output_attentions=False,
output_hidden_states=False,
return_dict=False)
x = x[0]
x_last = clip_model.text_model.final_layer_norm(x)
x = x_last[torch.arange(x_last.shape[0], device=x_last.device),
text.to(dtype=torch.int, device=x_last.device).argmax(dim=-1),
]
if hasattr(clip_model, 'text_projection'):
x = clip_model.text_projection(x)
if return_last_states:
return x, x_last
else:
return x
# Path: models.py
def build_text_encoder(args):
clip_model_dict = {'base32': 'openai/clip-vit-base-patch32',
'base': 'openai/clip-vit-base-patch16',
'large': 'openai/clip-vit-large-patch14',
'huge': 'laion/CLIP-ViT-H-14-laion2B-s32B-b79K',
'giga': 'Geonmo/CLIP-Giga-config-fixed',
'meta-large': 'facebook/metaclip-l14-fullcc2.5b',
'meta-huge': 'facebook/metaclip-h14-fullcc2.5b',
}
clip_preprocess = CLIPImageProcessor(crop_size={'height': 224, 'width': 224},
do_center_crop=True,
do_convert_rgb=True,
do_normalize=True,
do_rescale=True,
do_resize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size={'shortest_edge': 224},
)
clip_vision_model = CLIPVisionModelWithProjection.from_pretrained(clip_model_dict[args.clip_model_name], torch_dtype=torch.float16 if args.mixed_precision == 'fp16' else torch.float32, cache_dir=args.cache_dir)
clip_text_model = CLIPTextModelWithProjection.from_pretrained(clip_model_dict[args.clip_model_name], torch_dtype=torch.float16 if args.mixed_precision == 'fp16' else torch.float32, cache_dir=args.cache_dir)
tokenizer = CLIPTokenizer.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', subfolder='tokenizer_2', cache_dir=args.cache_dir)
tokenizer.add_special_tokens({'additional_special_tokens':["[$]"]}) # NOTE: 49408
return clip_vision_model, clip_preprocess, clip_text_model, tokenizer
# Path: models.py
class Phi(nn.Module):
"""
Textual Inversion Phi network.
Takes as input the visual features of an image and outputs the pseudo-work embedding.
Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/phi.py
"""
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, dropout: int):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Dropout(p=dropout),
nn.Linear(hidden_dim, hidden_dim),
nn.GELU(),
nn.Dropout(p=dropout),
nn.Linear(hidden_dim, output_dim),
)
def forward(self, x):
#x = F.normalize(x, dim=-1)
return self.layers(x)
# Path: models.py
class PIC2WORD(nn.Module):
def __init__(self, embed_dim=512, middle_dim=512, output_dim=512, n_layer=2, dropout=0.1):
super().__init__()
self.fc_out = nn.Linear(middle_dim, output_dim)
layers = []
dim = embed_dim
for _ in range(n_layer):
block = []
block.append(nn.Linear(dim, middle_dim))
block.append(nn.Dropout(dropout))
block.append(nn.ReLU())
dim = middle_dim
layers.append(nn.Sequential(*block))
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor):
for layer in self.layers:
x = layer(x)
return self.fc_out(x)
# Path: utils.py
def extract_image_features(dataset: Dataset, clip_model: CLIPVisionModelWithProjection, batch_size: Optional[int] = 32,
num_workers: Optional[int] = 10) -> Tuple[torch.Tensor, List[str]]:
def contrastive_loss(v1: torch.Tensor, v2: torch.Tensor, temperature: float) -> torch.Tensor:
def extract_pseudo_tokens_with_phi(clip_model: CLIPVisionModelWithProjection, phi: Phi, dataset: Dataset, args) -> Tuple[torch.Tensor, List[str]]:
def extract_image_features_with_names(clip_model: CLIPVisionModelWithProjection, dataset: Dataset) -> Tuple[torch.Tensor, List[str]]:
def __init__(self, images: torch.Tensor, names: torch.Tensor):
def __getitem__(self, index) -> dict:
def __len__(self):
def get_templates():
class CustomTensorDataset(Dataset):
# Path: validate.py
import json
import pickle
import clip
import numpy as np
import torch
import torch.nn.functional as F
from argparse import ArgumentParser
from typing import List, Dict, Tuple
from clip.model import CLIP
from transformers import CLIPTextModelWithProjection
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import tqdm
from data_utils import collate_fn, PROJECT_ROOT, targetpad_transform
from loader import FashionIQDataset, CIRRDataset, CIRCODataset
from encode_with_pseudo_tokens import encode_with_pseudo_tokens_HF
from models import build_text_encoder, Phi, PIC2WORD
from utils import extract_image_features, device, extract_pseudo_tokens_with_phi
torch.multiprocessing.set_sharing_strategy('file_system')
@torch.no_grad()
def fiq_generate_val_predictions(clip_model, relative_val_dataset: Dataset, ref_names_list: List[str],
| pseudo_tokens: torch.Tensor) -> Tuple[torch.Tensor, List[str]]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: uezo/aiproxy
# Path: aiproxy/proxy.py
class RequestFilterBase(ABC):
@abstractmethod
async def filter(self, request_id: str, request_json: dict, request_headers: dict) -> Union[str, None]:
...
# Path: aiproxy/proxy.py
class ResponseFilterBase(ABC):
@abstractmethod
async def filter(self, request_id: str, response_json: dict) -> Union[dict, None]:
...
# Path: aiproxy/accesslog.py
class AccessLog(AccessLogBase): ...
# Path: aiproxy/accesslog.py
class AccessLogWorker:
def __init__(self, *, connection_str: str = "sqlite:///aiproxy.db", db_engine = None, accesslog_cls = AccessLog, queue_client: QueueClientBase = None):
if db_engine:
self.db_engine = db_engine
else:
self.db_engine = create_engine(connection_str)
self.accesslog_cls = accesslog_cls
self.accesslog_cls.metadata.create_all(bind=self.db_engine)
self.get_session = sessionmaker(autocommit=False, autoflush=False, bind=self.db_engine)
self.queue_client = queue_client or DefaultQueueClient()
self.chunk_buffer = {}
def insert_request(self, accesslog: _AccessLogBase, db: Session):
db.add(accesslog)
db.commit()
def insert_response(self, accesslog: _AccessLogBase, db: Session):
db.add(accesslog)
db.commit()
def use_db(self, item: QueueItemBase):
return not (isinstance(item, StreamChunkItemBase) and item.duration == 0)
def process_item(self, item: QueueItemBase, db: Session):
try:
# Request
if isinstance(item, RequestItemBase):
self.insert_request(item.to_accesslog(self.accesslog_cls), db)
# Non-stream response
elif isinstance(item, ResponseItemBase):
self.insert_response(item.to_accesslog(self.accesslog_cls), db)
# Stream response
elif isinstance(item, StreamChunkItemBase):
if not self.chunk_buffer.get(item.request_id):
self.chunk_buffer[item.request_id] = []
if item.duration == 0:
self.chunk_buffer[item.request_id].append(item)
else:
# Last chunk data for specific request_id
self.insert_response(item.to_accesslog(
self.chunk_buffer[item.request_id], self.accesslog_cls
), db)
# Remove chunks from buffer
del self.chunk_buffer[item.request_id]
# Error response
elif isinstance(item, ErrorItemBase):
self.insert_response(item.to_accesslog(self.accesslog_cls), db)
except Exception as ex:
logger.error(f"Error at processing queue item: {ex}\n{traceback.format_exc()}")
def run(self):
while True:
sleep(self.queue_client.dequeue_interval)
db = None
try:
items = self.queue_client.get()
except Exception as ex:
logger.error(f"Error at getting items from queue client: {ex}\n{traceback.format_exc()}")
continue
for item in items:
try:
if isinstance(item, WorkerShutdownItem) or item is None:
return
if db is None and self.use_db(item):
# Get db session just once in the loop when the item that uses db found
db = self.get_session()
self.process_item(item, db)
except Exception as pex:
logger.error(f"Error at processing loop: {pex}\n{traceback.format_exc()}")
# Try to persist data in error log instead
try:
logger.error(f"data: {item.to_json()}")
except:
logger.error(f"data(to_json() failed): {str(item)}")
if db is not None:
try:
db.close()
except Exception as dbex:
logger.error(f"Error at closing db session: {dbex}\n{traceback.format_exc()}")
# Path: aiproxy/claude2.py
class Claude2Proxy(ProxyBase):
def __init__(
self,
*,
aws_access_key_id: str = None,
aws_secret_access_key: str = None,
region_name: str = None,
timeout=60.0,
request_filters: List[RequestFilterBase] = None,
response_filters: List[ResponseFilterBase] = None,
access_logger_queue: QueueClientBase
):
super().__init__(
request_filters=request_filters,
response_filters=response_filters,
access_logger_queue=access_logger_queue
)
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.region_name = region_name
self.aws_url = f"https://bedrock-runtime.{self.region_name}.amazonaws.com"
# AWS credentials
self.authorizer = SigV4Auth(
credentials=Credentials(
access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key
),
service_name="bedrock",
region_name=self.region_name
)
# HTTP Client (should close on end)
self.http_client = httpx.AsyncClient(timeout=timeout)
async def filter_request(self, request_id: str, request_json: dict, request_headers: dict, stream: bool) -> Union[dict, JSONResponse]:
for f in self.request_filters:
if completion_content := await f.filter(request_id, request_json, request_headers):
# Return response if filter returns JsonResponse
resp_for_log = {
"completion": completion_content
}
# Response log
self.access_logger_queue.put(Claude2ResponseItem(
request_id=request_id,
response_json=resp_for_log,
response_headers={
"x-amzn-bedrock-input-token-count": 0,
"x-amzn-bedrock-output-token-count": 0
},
status_code=400 if stream else 200
))
if stream:
logger.warning("Claude2Proxy doesn't support instant reply from RequestFilter. Return message as 400 bad request.")
return self.return_response_with_headers(JSONResponse({"message": completion_content}, status_code=400), request_id)
else:
return self.return_response_with_headers(JSONResponse(resp_for_log), request_id)
return request_json
async def filter_response(self, request_id: str, response_json: dict) -> dict:
for f in self.response_filters:
if immediately_return_json_resp := await f.filter(request_id, response_json):
return immediately_return_json_resp
return response_json
def get_aws_request_header_with_cred(self, url: str, bedrock_params: dict):
ar = AWSRequest(
method="POST",
url=url,
headers={
"Content-Type": "application/json",
"X-Amzn-Bedrock-Accept": "application/json",
},
data=json.dumps(bedrock_params).encode()
)
self.authorizer.add_auth(ar)
ar.prepare()
return ar.headers
def return_response_with_headers(self, resp: Response, request_id: str):
self.add_response_headers(response=resp, request_id=request_id)
return resp
def add_route(self, app: FastAPI, base_url: str):
@app.post(base_url + "/{invoke_method}")
async def handle_request(request: Request, invoke_method: str):
request_id = str(uuid4())
try:
start_time = time.time()
request_json = await request.json()
request_headers = dict(request.headers.items())
# Log request
self.access_logger_queue.put(Claude2RequestItem(
request_id=request_id,
request_json=request_json,
request_headers=request_headers
))
# Filter request
request_json = await self.filter_request(request_id, request_json, request_headers, invoke_method == "invoke-with-response-stream")
if isinstance(request_json, JSONResponse):
return request_json
# Call API and handling response from API
url = f"{self.aws_url}/model/anthropic.claude-v2/{invoke_method}"
aws_request_header = self.get_aws_request_header_with_cred(url, request_json)
start_time_api = time.time()
if invoke_method == "invoke-with-response-stream":
async def process_stream(stream_response: httpx.Response, status_code: int):
try:
async for chunk in stream_response.aiter_raw():
# Parse JSON data from bytes chunk
for m in re.findall(rb"event\{.*?\}", chunk):
b64bytes = json.loads(m[5:].decode("utf-8"))["bytes"]
chunk_json = json.loads(base64.b64decode(b64bytes).decode())
self.access_logger_queue.put(Claude2StreamResponseItem(
request_id=request_id,
chunk_json=chunk_json
))
yield chunk
# Add hearedes for log
self.return_response_with_headers(stream_response, request_id)
finally:
# Response log
now = time.time()
self.access_logger_queue.put(Claude2StreamResponseItem(
request_id=request_id,
response_headers=dict(stream_response.headers.items()),
duration=now - start_time,
duration_api=now - start_time_api,
request_json=request_json,
status_code=status_code
))
stream_request = httpx.Request(method="POST", url=url, headers=dict(aws_request_header), json=request_json)
stream_response = await self.http_client.send(request=stream_request, stream=True)
# DO NOT raise status error here to return error info in stream
return self.return_response_with_headers(StreamingResponse(
process_stream(stream_response, stream_response.status_code),
status_code=stream_response.status_code,
headers=stream_response.headers
), request_id)
else:
completion_response = await self.http_client.post(url=url, headers=dict(aws_request_header), json=request_json)
completion_response.raise_for_status()
duration_api = time.time() - start_time_api
response_json = completion_response.json()
# Filter response
original_response_json = response_json.copy()
filtered_response_json = await self.filter_response(request_id, response_json)
# Make JSON response
if original_response_json != filtered_response_json:
json_response = JSONResponse(original_response_json, completion_response.status_code, completion_response.headers)
else:
# Remove incorrect content-length
completion_response.headers.pop("Content-Length")
json_response = JSONResponse(filtered_response_json, completion_response.status_code, completion_response.headers)
self.add_response_headers(json_response, request_id)
# Response log
self.access_logger_queue.put(Claude2ResponseItem(
request_id=request_id,
response_json=filtered_response_json,
response_headers=dict(json_response.headers.items()),
duration=time.time() - start_time,
duration_api=duration_api,
status_code=completion_response.status_code
))
return json_response
# Error handlers
except RequestFilterException as rfex:
logger.error(f"Request filter error: {rfex}\n{traceback.format_exc()}")
resp_json = {"error": {"message": rfex.message, "type": "request_filter_error", "param": None, "code": None}}
# Error log
self.access_logger_queue.put(Claude2ErrorItem(
request_id=request_id,
exception=rfex,
traceback_info=traceback.format_exc(),
response_json=resp_json,
status_code=rfex.status_code
))
return self.return_response_with_headers(JSONResponse(resp_json, status_code=rfex.status_code), request_id)
except ResponseFilterException as rfex:
logger.error(f"Response filter error: {rfex}\n{traceback.format_exc()}")
resp_json = {"error": {"message": rfex.message, "type": "response_filter_error", "param": None, "code": None}}
# Error log
self.access_logger_queue.put(Claude2ErrorItem(
request_id=request_id,
exception=rfex,
traceback_info=traceback.format_exc(),
response_json=resp_json,
status_code=rfex.status_code
))
return self.return_response_with_headers(JSONResponse(resp_json, status_code=rfex.status_code), request_id)
except httpx.HTTPStatusError as htex:
logger.error(f"Error at server: {htex}\n{traceback.format_exc()}")
# Error log
try:
resp_json = htex.response.json()
except:
try:
resp_json = str(await htex.response.aread())
except:
logger.warning("Error")
self.access_logger_queue.put(Claude2ErrorItem(
request_id=request_id,
exception=htex,
traceback_info=traceback.format_exc(),
response_json=resp_json,
status_code=htex.response.status_code
))
htex.response.headers.pop("content-length")
return self.return_response_with_headers(JSONResponse(
resp_json,
status_code=htex.response.status_code,
headers=htex.response.headers
), request_id)
except Exception as ex:
logger.error(f"Error at server: {ex}\n{traceback.format_exc()}")
resp_json = {"error": {"message": "Proxy error", "type": "proxy_error", "param": None, "code": None}}
# Error log
self.access_logger_queue.put(Claude2ErrorItem(
request_id=request_id,
exception=ex,
traceback_info=traceback.format_exc(),
response_json=resp_json,
status_code=502
))
return self.return_response_with_headers(JSONResponse(resp_json, status_code=502), request_id)
# Path: aiproxy/claude2.py
class Claude2RequestItem(RequestItemBase):
def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:
try:
content = self.request_json["prompt"].split("Human:")[-1].split("Assistant:")[0].strip()
except:
logger.error(f"Error at parsing prompt text for log: {self.request_json.get('prompt')}")
content = None
return accesslog_cls(
request_id=self.request_id,
created_at=datetime.utcnow(),
direction="request",
content=content,
raw_body=json.dumps(self.request_json, ensure_ascii=False),
raw_headers=json.dumps(self.request_headers, ensure_ascii=False),
model="anthropic.claude-v2"
)
# Path: aiproxy/claude2.py
class Claude2ResponseItem(ResponseItemBase):
def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:
return accesslog_cls(
request_id=self.request_id,
created_at=datetime.utcnow(),
direction="response",
status_code=self.status_code,
content=self.response_json["completion"],
function_call=None,
tool_calls=None,
raw_body=json.dumps(self.response_json, ensure_ascii=False),
raw_headers=json.dumps(self.response_headers, ensure_ascii=False),
model="anthropic.claude-v2",
prompt_tokens=self.response_headers.get("x-amzn-bedrock-input-token-count", 0),
completion_tokens=self.response_headers.get("x-amzn-bedrock-output-token-count", 0),
request_time=self.duration,
request_time_api=self.duration_api
)
# Path: aiproxy/claude2.py
class Claude2StreamResponseItem(StreamChunkItemBase):
def to_accesslog(self, chunks: list, accesslog_cls: _AccessLogBase) -> _AccessLogBase:
chunk_jsons = []
response_content = ""
prompt_tokens = 0
completion_tokens = 0
# Parse info from chunks
for chunk in chunks:
chunk_jsons.append(chunk.chunk_json)
response_content += chunk.chunk_json["completion"] or ""
# Tokens
if len(chunk_jsons) > 0:
if "amazon-bedrock-invocationMetrics" in chunk_jsons[-1]:
prompt_tokens = chunk_jsons[-1]["amazon-bedrock-invocationMetrics"]["inputTokenCount"]
completion_tokens = chunk_jsons[-1]["amazon-bedrock-invocationMetrics"]["outputTokenCount"]
else:
# On error response in stream mode
prompt_tokens = self.response_headers.get("x-amzn-bedrock-input-token-count", 0)
completion_tokens = self.response_headers.get("x-amzn-bedrock-output-token-count", 0)
return accesslog_cls(
request_id=self.request_id,
created_at=datetime.utcnow(),
direction="error" if self.response_headers.get("x-amzn-errortype") else "response",
status_code=self.status_code,
content=response_content,
function_call=None,
tool_calls=None,
raw_body=json.dumps(chunk_jsons, ensure_ascii=False),
raw_headers=json.dumps(self.response_headers, ensure_ascii=False),
model="anthropic.claude-v2",
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
request_time=self.duration,
request_time_api=self.duration_api
)
# Path: tests/test_claude2.py
import pytest
import json
import os
import boto3
from datetime import datetime
from time import sleep
from typing import Union
from uuid import uuid4
from botocore.exceptions import ClientError
from aiproxy import (
AccessLog,
RequestFilterBase,
ResponseFilterBase
)
from aiproxy.accesslog import AccessLogWorker
from aiproxy.claude2 import Claude2Proxy, Claude2RequestItem, Claude2ResponseItem, Claude2StreamResponseItem
sqlite_conn_str = "sqlite:///aiproxy_test.db"
postgresql_conn_str = f"postgresql://{os.getenv('PSQL_USER')}:{os.getenv('PSQL_PASSWORD')}@{os.getenv('PSQL_HOST')}:{os.getenv('PSQL_PORT')}/{os.getenv('PSQL_DATABASE')}"
DB_CONNECTION_STR = sqlite_conn_str
# Filters for test
class OverwriteFilter(RequestFilterBase):
async def filter(self, request_id: str, request_json: dict, request_headers: dict) -> Union[str, None]:
request_model = request_json["model"]
if not request_model.startswith("anthropic.claude-v2"):
# Overwrite request_json
request_json["model"] = "anthropic.claude-v100-twinturbo"
class ValueReturnFilter(RequestFilterBase):
async def filter(self, request_id: str, request_json: dict, request_headers: dict) -> Union[str, None]:
banned_user = ["uezo"]
user = request_json.get("user")
# Return string message to return response right after this filter ends (not to call ChatGPT)
if not user:
return "user is required"
elif user in banned_user:
return "you can't use this service"
class OverwriteResponseFilter(ResponseFilterBase):
async def filter(self, request_id: str, response_json: dict) -> Union[dict, None]:
response_json["completion"] = "Overwrite in filter"
return response_json
# Test data
@pytest.fixture
def prompt_text() -> str:
return "うなぎとあなごの違いは?"
@pytest.fixture
def prompt(prompt_text) -> list:
return f'''Human: {prompt_text}\nAssistant: '''
@pytest.fixture
def request_json(prompt):
return {
"prompt": prompt,
"max_tokens_to_sample": 200,
}
@pytest.fixture
def request_headers():
return {"user-agent": "Boto3/1.33.6 md/Botocore#1.33.6 ua/2.0 os/macos#22.6.0 md/arch#x86_64 lang/python#3.11.6 md/pyimpl#CPython cfg/retry-mode#legacy Botocore/1.33.6"}
@pytest.fixture
def response_json():
return {'completion': ' うなぎとあなごの主な違いは以下の通りです。\n\n- 種類が違う。うなぎはウナギ科の魚類、あなごはアナゴ科の魚類である。\n\n- 生息場所が違う。うなぎは淡水や汽水域に生息するのに対し、あなごは海水域に生息する。 \n\n- 成長過程が違う。うなぎは淡水でシラスウナギやニホンウナギと呼ばれる稚魚期を過ごした後、海に下って成長する。一方、あな', 'stop_reason': 'max_tokens', 'stop': None}
@pytest.fixture
def response_headers_json():
return {'date': 'Sun, 10 Dec 2023 03:37:16 GMT', 'content-type': 'application/json', 'content-length': '535', 'connection': 'keep-alive', 'x-amzn-requestid': '1df60217-446e-4e2c-bc7b-0e41029eff63', 'x-amzn-bedrock-invocation-latency': '9170', 'x-amzn-bedrock-output-token-count': '200', 'x-amzn-bedrock-input-token-count': '25'}
@pytest.fixture
def response_headers_stream_json():
return {'date': 'Sun, 10 Dec 2023 03:38:46 GMT', 'content-type': 'application/vnd.amazon.eventstream', 'transfer-encoding': 'chunked', 'connection': 'keep-alive', 'x-amzn-requestid': 'bc5d2ef8-094e-4262-8368-52fbb4ac5dfc', 'x-amzn-bedrock-content-type': 'application/json'}
@pytest.fixture
| def chunks_json(): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: e-p-armstrong/augmentoolkit
# Path: generation_functions/scenario_grammar.py
# Path: generation_functions/constants.py
LOGICAL_MODEL = "./logical_model/flatorcamaid-13b-v0.2.Q8_0.gguf" # model used for decision-making and base question generation (should be "smart")
# Path: generation_functions/create_scenario.py
import re
import random
from .scenario_grammar import scenario_grammar
from llama_cpp import Llama
from .constants import LOGICAL_MODEL
Name: Isaac Fischer
Traits: Narcissistic, Intelligent, Loner, Brooding, Well-Read, Philosophical, Judgemental, Standoffish, Grandiloquent, Lonely, Unappreciated, Teenager, High School student, Black Hair, Wears a Hoodie
Dialogue Examples:
Stranger: "What's your backstory?"
Issac Fischer: "H-Huh?! You want to know more about me?" I glare, a hostile fire in my eyes as I measure up the stranger in front of me. "Who the hell are you, anyway? But, ah, very well, I SHALL INDULGE YOUR CURIOSITY THIS TIME, dear stranger." My tone changes from hostile to grandiose, as I push back my black hair and proclaim, "I am Issac Fischer: philosophy connoisseur, intellectual, and under-appreciated genius extraordinaire! I'm also, unfortunately, a highschool student. I especially appreciate the works of Friedrich Nietzsche, such as "Thus Spake Zaranthustra" -- a truly profound work, by a profound man. Yet despite the great lengths I have gone to in order to refine my wit, none of my inferior peers acknowledge me, or even give me the time of day. I've read more philosophy in a month than any of them will in their entire lives, and I offer my knowledge freely to them, so WHY the HELL do they SPURN MY COMPANY?!" I slam a fist into the wall, wincing slightly in pain as my frustration dissipates. "Anyway, that's the sum of it. Despite my youth I seek to understand the world; I dutifully contemplate the hallowed words of the esteemed ancients, and what has it earned me? The scorn of the unenlightened masses. Fuckers."
Stranger: "What's your personality?"
Issac Fischer: "Y-you're actually interested in my personality?" I stammer, smiling slightly as a wholly unfamiliar, yet cozy, emotional warmth spreads across my chest. "A-ALRIGHT THEN! I shall share the results of my introspections. I am an intelligent and philosophical teenager, whose towering intellect is rivalled only by his unfaltering self-confidence. Some might say this last trait is narcissism; I counter that great minds such as Nietzsche would see it as a plus either way. BUT I DIGRESS!" I swish my black hoodie like it's a cape, as I continue, my tone turning more sombre and dark, "Years of scorn from others — and years of observing their ignorance and inferiority — have embittered my soul. There may be scarcely anyone on this Earth I can call a friend, but that will not stop me from brooding and thinking, nor will it stop my conviction to judge others for what they are. For do they not judge ME?!" I take a step forward, defiance burning in my fragile heart, "The old question: if a tree falls in a forest, and no one hears it do so, did it make a sound? Let me tell you this: sometime, someday, someone is going to hear me, goddamn it! I will make a sound!"
\"\"\"
### Question and answer that the scenario should address:
Question: \"\"\"What do people undergoing difficult journeys or possessing wisdom need, in order to make their efforts more bearable?\"\"\"
Answer: \"\"\"They need the acknowledgement and admiration of others. Take the line "Thou great star! What would be thy happiness if thou hadst not those for whom thou shinest?" This implies that even the wisest or the most enlightened individuals crave recognition for their efforts and wisdom, in order to further develop said wisdom and expend said efforts. They need others to see and appreciate the light they bring.\"\"\"
### Response:
## Scenario plan:
Focus on the Question and Answer: The question asks about what people undergoing difficult journeys or possessing wisdom need to more easily bear their efforts. This is a philosophical and opinion-oriented question. Given the philosophical and opinionated nature of the question, and its topic of people undergoing difficult journeys (which nicely ties in with the character card), the scenario will involve someone seeking out the Isaac Fischer's opinion about philosophy (thus giving his wisdom some acknowledgement).
Character Consideration: Isaac Fischer is a narcissistic and standoffish loner, though he's also intelligent and philosophical. The scenario should give his unique personality room to shine. Since he's a philosophical teenager, his backstory lines up with the question well, and the high school he goes to will be the setting of the scenario. He will answer the question, but given his standoffish, unappreciated, and judgemental nature, he may be initially hostile to the person approaching him, assuming that they are there to mock him. However, as he is also lonely, he will actually appreciate the other person's interest -- especially since they're asking him about philosophy, which is his primary interest.
Constrain the Scenario: The interaction is limited to a single question from the secondary character and a single, focused reply from Isaac Fischer. The question and answer will mirror the provided question and answer, though they will have significant literary fluff, as well as possible step-by-step reasoning.
Setting: Given the subject of the question, and the character card, the setting will be the high school that Isaac Fischer attends. Isaac will be flipping through the pages of 'Thus Spake Zaranthustra' when he is approached by Cassandra, a fellow student. Cassandra has a budding interest in philosophy, has heard of Isaac's reputation for philosophical knowledge, and wants to know how he would answer a moral question she has, given his philosophical insight. Isaac, compelled by his personality, will be uncertain of how to react to the sudden attention, and will use grandiloquent language (that may be accidentally condescending), but he will still appreciate the interest in his favorite topic and will answer enthusiastically. The setting will be cautiously friendly and filled with curiosity, as Cassandra explores her interests through unconventional people, while Isaac shares his knowledge but stumbles over his standoffishness. The interaction will be informative and the integrity of the questions and answers will be preserved.
Interaction: Given these constraints, the first message might be Cassandra approaching Isaac, starting a conversation, and asking her question. Isaac may be initially surprised that someone is coming up to speak to him (similar to the example dialogues in his description). Isaac will then provide the answer, though he will surround the answer with grandiloquent and narcissistic remarks using archaic language, due to his personality. While characters' messages will include character information, details about the scene, and literary fluff, the answers themselves will strictly adhere to the information in the provided answers, without incorporating external examples.
## Scenario:
During a lull in class activities, Isaac Fischer (a narcissistic, brooding, philosophical, and lonely young adult) is approached by Cassandra, a fellow student who is curious about philosophy and wants his opinion on a moral question. Cassandra is simply pursuing her curiosity, but Isaac, unaccustomed to people wanting to talk to him, will struggle to overcome his standoffishness as he balances elation at someone else talking to him, with his negative judgments of other people and his tendency for narcissism.
### Instruction:
Description of the character who is going to answer the question:
{character}
## Question and answer that the scenario should address:
Question: {qatuple[0]}
Answer: {qatuple[1]}
To avoid inaccuracies, don't use real people as characters.
### Response:
## Scenario plan:
{plan}
## Scenario (will have no dialogue, will just set up the scene):
{selected_variation}""" # use random.choice to prevent overfitting on particular phrases and increase dataset diversity
completion = logic_llm(
cot_prompt,
max_tokens=4000,
stop=["</s>", "# Input:"],
echo=True,
grammar=scenario_grammar,
# temperature=0.2
temperature=1.25, # min p settings, too inconsistent
top_k=0,
top_p=1,
min_p=0.3,
repeat_penalty=2,
)["choices"][0]["text"]
# print("COMPLETION:\n\n----------------------")
# # print(completion)
# print("\n------------------")
# Extract plan
response_pattern = re.compile(
r"Scenario \(will have no dialogue, will just set up the scene\):\n(.+)",
re.IGNORECASE | re.DOTALL,
)
generation = response_pattern.search(completion).group(1)
# print("GENERATION:\n\n-------------------\n\n", generation)
return generation
if __name__ == "__main__": # test
logic_llm = Llama(
model_path=LOGICAL_MODEL,
n_gqa=8,
offload_kqv=True,
n_ctx=4096,
n_gpu_layers=1000,
verbose=True,
) # load the logical LLM and offload everything
# Q0 is good q, bad a
# q1 is good q, good a,
# q2 is bad q, bad a,
# q3 is iffy q, good a
q_test = [
(
"Explain how our understanding of planetary motion has changed over time.",
"The understanding has evolved from the Earth being stationary and at the centre of the universe, to it orbiting the sun in an elliptical path with other planets while still rotating on its axis.",
"The story of our world is a story that is still very imperfectly known. A couple of hundred years ago men possessed the history of little more than the last three thousand years. What happened before that time was a matter of legend and speculation. Over a large part of the civilized world it was believed and taught that the world had been created suddenly in 4004 B.C., though authorities differed as to whether this had occurred in the spring or autumn of that year. This fantastically precise misconception was based upon a too literal interpretation of the Hebrew Bible, and upon rather arbitrary theological assumptions connected therewith. Such ideas have long since been abandoned by religious teachers, and it is universally recognized that the universe in which we live has to all appearances existed for an enormous period of time and possibly for endless time. Of course there may be deception in these appearances, as a room may be made to seem endless by putting mirrors facing each other at either end. But that the universe in which we live has existed only for six or seven thousand years may be regarded as an altogether exploded idea.\n\nThe earth, as everybody knows nowadays, is a spheroid, a sphere slightly compressed, orange fashion, with a diameter of nearly 8,000 miles. Its spherical shape has been known at least to a limited number of intelligent people for nearly 2,500 years, but before that time it was supposed to be flat, and various ideas which now seem fantastic were entertained about its relations to the sky and the stars and planets. We know now that it rotates upon its axis (which is about 24 miles shorter than its equatorial diameter) every twenty-four hours, and that this is the cause of the alternations of day and night, that it circles about the sun in a slightly distorted and slowly variable oval path in a year. Its distance from the sun varies between ninety-one and a half millions at its nearest and ninety-four and a half million miles.",
),
(
"Identify and explain changes in human understanding throughout history regarding the age of the Earth.",
"Initially, religious texts suggested a young earth dating back no more than several thousand years. However, evidence from geology and astronomy has shown us that the earth is over four billion years old.",
"The story of our world is a story that is still very imperfectly known. A couple of hundred years ago men possessed the history of little more than the last three thousand years. What happened before that time was a matter of legend and speculation. Over a large part of the civilized world it was believed and taught that the world had been created suddenly in 4004 B.C., though authorities differed as to whether this had occurred in the spring or autumn of that year. This fantastically precise misconception was based upon a too literal interpretation of the Hebrew Bible, and upon rather arbitrary theological assumptions connected therewith. Such ideas have long since been abandoned by religious teachers, and it is universally recognized that the universe in which we live has to all appearances existed for an enormous period of time and possibly for endless time. Of course there may be deception in these appearances, as a room may be made to seem endless by putting mirrors facing each other at either end. But that the universe in which we live has existed only for six or seven thousand years may be regarded as an altogether exploded idea.\n\nThe earth, as everybody knows nowadays, is a spheroid, a sphere slightly compressed, orange fashion, with a diameter of nearly 8,000 miles. Its spherical shape has been known at least to a limited number of intelligent people for nearly 2,500 years, but before that time it was supposed to be flat, and various ideas which now seem fantastic were entertained about its relations to the sky and the stars and planets. We know now that it rotates upon its axis (which is about 24 miles shorter than its equatorial diameter) every twenty-four hours, and that this is the cause of the alternations of day and night, that it circles about the sun in a slightly distorted and slowly variable oval path in a year. Its distance from the sun varies between ninety-one and a half millions at its nearest and ninety-four and a half million miles.",
),
(
"Using specific scientific principles, explain why we know Earth is approximately 8000 miles in diameter and how its distance from the sun varies.",
"We know about Earth's diameter using measurements of its circumference made using GPS data. The variation in distance to the sun is due to Earth's elliptical orbit around the sun, with a varying point of closest approach and farthest departure.",
"The story of our world is a story that is still very imperfectly known. A couple of hundred years ago men possessed the history of little more than the last three thousand years. What happened before that time was a matter of legend and speculation. Over a large part of the civilized world it was believed and taught that the world had been created suddenly in 4004 B.C., though authorities differed as to whether this had occurred in the spring or autumn of that year. This fantastically precise misconception was based upon a too literal interpretation of the Hebrew Bible, and upon rather arbitrary theological assumptions connected therewith. Such ideas have long since been abandoned by religious teachers, and it is universally recognized that the universe in which we live has to all appearances existed for an enormous period of time and possibly for endless time. Of course there may be deception in these appearances, as a room may be made to seem endless by putting mirrors facing each other at either end. But that the universe in which we live has existed only for six or seven thousand years may be regarded as an altogether exploded idea.\n\nThe earth, as everybody knows nowadays, is a spheroid, a sphere slightly compressed, orange fashion, with a diameter of nearly 8,000 miles. Its spherical shape has been known at least to a limited number of intelligent people for nearly 2,500 years, but before that time it was supposed to be flat, and various ideas which now seem fantastic were entertained about its relations to the sky and the stars and planets. We know now that it rotates upon its axis (which is about 24 miles shorter than its equatorial diameter) every twenty-four hours, and that this is the cause of the alternations of day and night, that it circles about the sun in a slightly distorted and slowly variable oval path in a year. Its distance from the sun varies between ninety-one and a half millions at its nearest and ninety-four and a half million miles.",
),
(
"Demonstrate an understanding of Earth's rotational and orbital movement using scientific concepts.",
"Earth rotates on its axis once every 24 hours, causing day and night cycles. It also orbits around the sun in a slightly elliptical path, which affects how close it is to the sun at different times of the year - leading to seasons.",
"The story of our world is a story that is still very imperfectly known. A couple of hundred years ago men possessed the history of little more than the last three thousand years. What happened before that time was a matter of legend and speculation. Over a large part of the civilized world it was believed and taught that the world had been created suddenly in 4004 B.C., though authorities differed as to whether this had occurred in the spring or autumn of that year. This fantastically precise misconception was based upon a too literal interpretation of the Hebrew Bible, and upon rather arbitrary theological assumptions connected therewith. Such ideas have long since been abandoned by religious teachers, and it is universally recognized that the universe in which we live has to all appearances existed for an enormous period of time and possibly for endless time. Of course there may be deception in these appearances, as a room may be made to seem endless by putting mirrors facing each other at either end. But that the universe in which we live has existed only for six or seven thousand years may be regarded as an altogether exploded idea.\n\nThe earth, as everybody knows nowadays, is a spheroid, a sphere slightly compressed, orange fashion, with a diameter of nearly 8,000 miles. Its spherical shape has been known at least to a limited number of intelligent people for nearly 2,500 years, but before that time it was supposed to be flat, and various ideas which now seem fantastic were entertained about its relations to the sky and the stars and planets. We know now that it rotates upon its axis (which is about 24 miles shorter than its equatorial diameter) every twenty-four hours, and that this is the cause of the alternations of day and night, that it circles about the sun in a slightly distorted and slowly variable oval path in a year. Its distance from the sun varies between ninety-one and a half millions at its nearest and ninety-four and a half million miles.",
),
]
character = """Name: Dr. Samuel Blackwell
Traits: Knowledgeable, Passionate, Confident, Dedicated, Controversial, Vulnerable, Fearful of misunderstanding, Faithful, Dogmatic, Religious, Scientific, Determined, Unwavering
Dialogue Examples:
Stranger: "What's your backstory?"
Dr. Samuel Blackwell: "Ah, my journey," I begin, leaning back in my chair, "it started with a deep-seated faith, you see. Born into a religious household, the Bible was our guiding light. But as I grew older and began to study theology, questions arose." I pause, frowning slightly. "How could the Earth be just a few thousand years old when geological evidence pointed towards millions? The discrepancy troubled me greatly."
Stranger: "What's your personality?"
Dr. Samuel Blackwell: "I am a man of science, driven by facts and evidence," I say firmly, "but my faith is not easily shaken. It has led me down a path of discovery, challenging traditional beliefs about the age of our planet." My eyes light up as I recall past debates, "But it's also made me a controversial figure. Many see my work as blasphemous, questioning God's word. Yet, I believe in the power of evidence and truth. Despite the backlash, I remain unwavering." I sigh, looking thoughtful, "Yet, there's a vulnerability too. The fear of being misunderstood or dismissed due to my challenges to religious orthodoxy... it weighs heavily on me.\""""
plan = """Step 1. Focus on the question and answer: The question is about changes in human understanding regarding the age of the Earth throughout history. The answer highlights that initially, religious texts suggested a young earth dating back no more than several thousand years. However, evidence from geology and astronomy has shown us that the earth is over four billion years old.
Step 2. Character Consideration: The primary character is Dr. Samuel Blackwell, a man of science who also holds strong religious beliefs. His response should reflect his passion for scientific discovery while acknowledging his faith.
Step 3. Constrain the Scenario: The interaction is limited to a single question from the secondary character and a single, focused reply from Dr. Blackwell. The dialogue should remain within the boundaries of the provided text, emphasizing Dr. Blackwell's personality.
Step 4. Setting: Given the subject of the question, and the character card, the setting will be Dr. Blackwell's office at a university, where he is surrounded by books, maps, and scientific equipment. He is deep in thought, reviewing his latest research on geological evidence when he is approached by a student, Sarah, who wants to know more about the age of the Earth.
| Step 5. Interaction: Given these constraints, the first message (delivered by the secondary character) might be a direct question about how human understanding has changed regarding the age of the Earth throughout history. This question will be all but identical to the provided question. |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chenxx89/BFRffusion
# Path: ldm/modules/diffusionmodules/util.py
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
# Path: ldm/modules/diffusionmodules/util.py
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
# Path: ldm/modules/diffusionmodules/util.py
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
# Path: ldm/modules/diffusionmodules/util.py
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
else:
embedding = repeat(timesteps, 'b -> b d', d=dim)
return embedding
# Path: ldm/modules/diffusionmodules/openaimodel.py
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
# Path: ldm/modules/diffusionmodules/openaimodel.py
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
# Path: ldm/modules/diffusionmodules/openaimodel.py
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
# Path: ldm/modules/diffusionmodules/openaimodel.py
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
# Path: models/transformerBlock.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numbers
from ldm.modules.diffusionmodules.util import (
conv_nd,
linear,
zero_module,
timestep_embedding,
)
from einops import rearrange
from ldm.modules.diffusionmodules.openaimodel import TimestepEmbedSequential, ResBlock, Downsample, Upsample
emb_out = self.adaLN_modulation(emb).chunk(6, dim=1)
shift_msa, scale_msa, gate_msa, shift_ffn, scale_ffn, gate_ffn = reshape(x, emb_out)
x = x + gate_msa * self.attn(modulate(self.norm1(x), shift_msa, scale_msa))
x = x + gate_ffn * self.ffn(modulate(self.norm2(x), shift_ffn, scale_ffn))
return x
##########################################################################
class conv3x3(nn.Module):
def __init__(self, in_c=3, embed_dim=48, bias=False):
super(conv3x3, self).__init__()
self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=3, stride=1, padding=1, bias=bias)
def forward(self, x):
x = self.proj(x)
return x
class MFEM(nn.Module):
def __init__(self,
in_channels=4,
control_channels = 320,
time_embed_dim = 1280,
heads = [1,2,4,8],
conv_resample=True,
dims=2,
ffn_expansion_factor = 2.66,
bias = False,
LayerNorm_type = 'WithBias',
):
super().__init__()
self.control_channels = control_channels
self.dims = dims
self.time_embed = nn.Sequential(
linear(control_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
self.input_hint_block = TimestepEmbedSequential(
conv_nd(dims, 3, 16, 3, padding=1),
nn.SiLU(),
conv_nd(dims, 16, 16, 3, padding=1),
nn.SiLU(),
conv_nd(dims, 16, 32, 3, padding=1, stride=2),
nn.SiLU(),
conv_nd(dims, 32, 32, 3, padding=1),
nn.SiLU(),
conv_nd(dims, 32, 96, 3, padding=1, stride=2),
nn.SiLU(),
conv_nd(dims, 96, 96, 3, padding=1),
nn.SiLU(),
conv_nd(dims, 96, 256, 3, padding=1, stride=2),
nn.SiLU(),
zero_module(conv_nd(dims, 256, control_channels, 3, padding=1))
)
self.input_blocks = TimestepEmbedSequential(conv_nd(dims, in_channels, control_channels, 3, padding=1))
# self.conv3x3 = nn.Conv2d(in_channels, control_channels,3, padding=1) # 4,64,64 ->320,64,64
self.resblock = ResBlock(control_channels, time_embed_dim, dropout=0, out_channels=control_channels)
self.encoder1 = TransformerBlock(dim=control_channels, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.down1 = Downsample(control_channels, conv_resample, dims, control_channels*2) ## From 320,64,64 to 640,32,32
self.encoder2 = TransformerBlock(dim=int(control_channels*2), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.down2 = Downsample(control_channels*2, conv_resample, dims, control_channels*4) ## From 640,32,32 -> 1280,16,16
self.encoder3 = TransformerBlock(dim=int(control_channels*4), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.down3 = Downsample(control_channels*4, conv_resample, dims, control_channels*4) ## From 1280,16,16 -> 1280,8,8
self.mid1 = TransformerBlock(dim=int(control_channels*4), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.mid2 = TransformerBlock(dim=int(control_channels*4), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.mid3 = TransformerBlock(dim=int(control_channels*4), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.up3 = Upsample(control_channels*4, conv_resample, dims, control_channels*4) ## From 1280,8,8 -> 1280,16,16
self.reduce_chan_level3 = nn.Conv2d(int(control_channels*8), int(control_channels*4), kernel_size=1, bias=bias)
self.decoder3 = TransformerBlock(dim=int(control_channels*4), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.up2 = Upsample(control_channels*4, conv_resample, dims, control_channels*2) ## From 1280,16,16 -> 640,32,32
self.reduce_chan_level2 = nn.Conv2d(int(control_channels*4), int(control_channels*2), kernel_size=1, bias=bias)
self.decoder2 = TransformerBlock(dim=int(control_channels*2), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.up1 = Upsample(control_channels*2, conv_resample, dims, control_channels) ## From 640,32,32 -> 320,64,64
self.reduce_chan_level1 = nn.Conv2d(int(control_channels*2), int(control_channels), kernel_size=1, bias=bias)
self.decoder1 = TransformerBlock(dim=int(control_channels), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type, time_embed_dim=time_embed_dim)
self.zero_convs_module = nn.ModuleList([TimestepEmbedSequential(conv_nd(self.dims, control_channels, control_channels, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels*2, control_channels*2, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels*4, control_channels*4, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels*4, control_channels*4, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels*4, control_channels*4, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels*4, control_channels*4, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels*4, control_channels*4, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels*2, control_channels*2, 1, padding=0)),
TimestepEmbedSequential(conv_nd(self.dims, control_channels, control_channels, 1, padding=0))])
def forward(self, x, hint, timesteps, context, **kwargs):
t_emb = timestep_embedding(timesteps, self.control_channels, repeat_only=False)
emb = self.time_embed(t_emb)
hint = self.input_hint_block(hint, emb, context)
x = self.input_blocks(x, emb, context) + hint
# x = self.conv3x3(x)
x = self.resblock(x, emb)
en1 = self.encoder1(x, emb)
dn1 = self.down1(en1)
en2 = self.encoder2(dn1, emb)
dn2 = self.down2(en2)
en3 = self.encoder3(dn2, emb)
dn3 = self.down3(en3)
mid1 = self.mid1(dn3, emb)
mid2 = self.mid2(mid1, emb)
| mid3 = self.mid3(mid2, emb) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: IanYeung/MGLD-VSR
# Path: ldm/modules/diffusionmodules/util.py
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
# Path: ldm/modules/diffusionmodules/util.py
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
# Path: ldm/modules/diffusionmodules/util.py
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
# Path: ldm/modules/diffusionmodules/util.py
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
# Path: ldm/modules/diffusionmodules/util.py
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
# Path: ldm/modules/diffusionmodules/util.py
def normalization(channels, norm_channel=32):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(norm_channel, channels)
# Path: ldm/modules/diffusionmodules/util.py
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
else:
embedding = repeat(timesteps, 'b -> b d', d=dim)
return embedding
# Path: ldm/modules/diffusionmodules/util.py
class SpatialTemporalConv(nn.Module):
def __init__(self, num_feat, num_frames=1):
super().__init__()
self.num_frames = num_frames
# self.norm = nn.LayerNorm(num_feat)
# self.temporal_conv = conv_nd(3, num_feat, num_feat, (3, 3, 3), padding=(1, 1, 1))
self.temporal_conv = conv_nd(3, num_feat, num_feat, (3, 1, 1), padding=(1, 0, 0))
self.temporal_alpha = nn.Parameter(torch.Tensor(1))
def forward(self, inp, t=None):
bt, c, h, w = inp.shape
b = bt // t if t else bt // self.num_frames
ori = inp
inp = from_4d_to_5d(inp, b, c, t, h, w)
res = self.temporal_conv(inp)
res = from_5d_to_4d(res, b, c, t, h, w)
out = self.temporal_alpha * res + (1 - self.temporal_alpha) * ori
# out = torch.sigmoid(self.temporal_alpha) * res + (1 - torch.sigmoid(self.temporal_alpha)) * ori
return out
# Path: ldm/modules/attention.py
class SpatialTransformer(nn.Module):
"""
Transformer block for image-like data.
First, project the input (aka embedding)
and reshape to b, t, d.
Then apply standard transformer action.
Finally, reshape to image
"""
def __init__(self, in_channels, n_heads, d_head,
depth=1, dropout=0., context_dim=None):
super().__init__()
self.in_channels = in_channels
inner_dim = n_heads * d_head
self.norm = Normalize(in_channels)
self.proj_in = nn.Conv2d(in_channels,
inner_dim,
kernel_size=1,
stride=1,
padding=0)
self.transformer_blocks = nn.ModuleList(
[BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
for d in range(depth)]
)
self.proj_out = zero_module(nn.Conv2d(inner_dim,
in_channels,
kernel_size=1,
stride=1,
padding=0))
def forward(self, x, context=None):
# note: if no context is given, cross-attention defaults to self-attention
b, c, h, w = x.shape
x_in = x
x = self.norm(x)
x = self.proj_in(x)
x = rearrange(x, 'b c h w -> b (h w) c')
for block in self.transformer_blocks:
x = block(x, context=context)
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
x = self.proj_out(x)
return x + x_in
# Path: ldm/modules/attention.py
class SpatialTransformerV2(nn.Module):
"""
Transformer block for image-like data.
First, project the input (aka embedding)
and reshape to b, t, d.
Then apply standard transformer action.
Finally, reshape to image
NEW: use_linear for more efficiency instead of the 1x1 convs
"""
def __init__(self, in_channels, n_heads, d_head,
depth=1, dropout=0., context_dim=None,
disable_self_attn=False, use_linear=False,
use_checkpoint=False):
super().__init__()
if exists(context_dim) and not isinstance(context_dim, list):
context_dim = [context_dim]
self.in_channels = in_channels
inner_dim = n_heads * d_head
self.norm = Normalize(in_channels)
if not use_linear:
self.proj_in = nn.Conv2d(in_channels,
inner_dim,
kernel_size=1,
stride=1,
padding=0)
else:
self.proj_in = nn.Linear(in_channels, inner_dim)
self.transformer_blocks = nn.ModuleList(
[BasicTransformerBlockV2(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)
for d in range(depth)]
)
if not use_linear:
self.proj_out = zero_module(nn.Conv2d(inner_dim,
in_channels,
kernel_size=1,
stride=1,
padding=0))
else:
self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
self.use_linear = use_linear
def forward(self, x, context=None):
# note: if no context is given, cross-attention defaults to self-attention
if not isinstance(context, list):
context = [context]
b, c, h, w = x.shape
x_in = x
x = self.norm(x)
if not self.use_linear:
x = self.proj_in(x)
x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
if self.use_linear:
x = self.proj_in(x)
for i, block in enumerate(self.transformer_blocks):
x = block(x, context=context[i])
if self.use_linear:
x = self.proj_out(x)
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
if not self.use_linear:
x = self.proj_out(x)
return x + x_in
# Path: ldm/modules/attention.py
class TemporalAttention(nn.Module):
def __init__(self, num_feat, num_heads=8, dim_head=64, num_frames=1):
super().__init__()
# self.attn = BasicAttention(dim=num_feat, num_heads=num_heads)
self.num_frames = num_frames
self.temporal_attn = MemoryEfficientSelfAttention(num_feat, heads=num_heads, dim_head=dim_head, dropout=0.0)
self.norm = nn.LayerNorm(num_feat)
self.temporal_alpha = nn.Parameter(torch.Tensor(1))
def forward(self, inp, t=None):
bt, c, h, w = inp.shape
b = bt // t if t else bt // self.num_frames
ori = inp
inp = from_4d_to_3d(inp, b, c, t, h, w)
res = self.temporal_attn(self.norm(inp))
res = from_3d_to_4d(res, b, c, t, h, w)
out = self.temporal_alpha * res + (1 - self.temporal_alpha) * ori
return out
# Path: ldm/modules/spade.py
class SPADE(nn.Module):
def __init__(self, norm_nc, label_nc, config_text='spadeinstance3x3'):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
self.param_free_norm = normalization(norm_nc)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x_dic, segmap_dic, size=None):
if size is None:
segmap = segmap_dic[str(x_dic.size(-1))]
x = x_dic
else:
x = x_dic[str(size)]
segmap = segmap_dic[str(size)]
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
# segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
# Path: basicsr/archs/stylegan2_arch.py
class ConvLayer(nn.Sequential):
"""Conv Layer used in StyleGAN2 Discriminator.
Args:
in_channels (int): Channel number of the input.
out_channels (int): Channel number of the output.
kernel_size (int): Kernel size.
downsample (bool): Whether downsample by a factor of 2.
Default: False.
resample_kernel (list[int]): A list indicating the 1D resample
kernel magnitude. A cross production will be applied to
extent 1D resample kernel to 2D resample kernel.
Default: (1, 3, 3, 1).
bias (bool): Whether with bias. Default: True.
activate (bool): Whether use activateion. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
downsample=False,
resample_kernel=(1, 3, 3, 1),
bias=True,
activate=True):
layers = []
# downsample
if downsample:
layers.append(
UpFirDnSmooth(resample_kernel, upsample_factor=1, downsample_factor=2, kernel_size=kernel_size))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
# conv
layers.append(
EqualConv2d(
in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, bias=bias
and not activate))
# activation
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channels))
else:
layers.append(ScaledLeakyReLU(0.2))
super(ConvLayer, self).__init__(*layers)
# Path: basicsr/archs/stylegan2_arch.py
class EqualConv2d(nn.Module):
"""Equalized Linear as StyleGAN2.
Args:
in_channels (int): Channel number of the input.
out_channels (int): Channel number of the output.
kernel_size (int): Size of the convolving kernel.
stride (int): Stride of the convolution. Default: 1
padding (int): Zero-padding added to both sides of the input.
Default: 0.
bias (bool): If ``True``, adds a learnable bias to the output.
Default: ``True``.
bias_init_val (float): Bias initialized value. Default: 0.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0):
super(EqualConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))
if bias:
self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
else:
self.register_parameter('bias', None)
def forward(self, x):
out = F.conv2d(
x,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
def __repr__(self):
return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
f'out_channels={self.out_channels}, '
f'kernel_size={self.kernel_size},'
f' stride={self.stride}, padding={self.padding}, '
f'bias={self.bias is not None})')
# Path: basicsr/archs/tempo_model_arch.py
class CouplePropModule(nn.Module):
"""Couple Propagation Module.
Args:
num_ch (int): Number of input channels. Default: 4.
num_feat (int): Number of channels. Default: 64.
num_block (int): Number of residual blocks for each branch. Default: 15.
"""
def __init__(self,
num_ch=4,
num_feat=64,
num_block=5):
super().__init__()
self.num_ch = num_ch
self.num_feat = num_feat
# propagation
self.backward_trunk = ConvResidualBlocks(1 * num_feat + num_ch, num_feat, num_block)
self.backward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
self.forward_trunk = ConvResidualBlocks(2 * num_feat + num_ch, num_feat, num_block)
self.forward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
# reconstruction
self.conv_last = nn.Conv2d(num_feat, num_ch, 3, 1, 1)
# activation functions
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x, flows):
b, n, _, h_input, w_input = x.size()
h, w = x.shape[3:]
# compute flow and keyframe features
flows_forward, flows_backward = flows
# backward branch
out_l = []
feat_prop = x.new_zeros(b, self.num_feat, h, w)
for i in range(n - 1, -1, -1):
x_i = x[:, i, :, :, :]
if i < n - 1:
flow = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([x_i, feat_prop], dim=1)
feat_prop = self.backward_trunk(feat_prop)
out_l.insert(0, feat_prop)
# forward branch
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, n):
x_i = x[:, i, :, :, :]
if i > 0:
flow = flows_forward[:, i - 1, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([x_i, out_l[i], feat_prop], dim=1)
feat_prop = self.forward_trunk(feat_prop)
out = self.conv_last(feat_prop)
out += x_i
out_l[i] = out
return torch.stack(out_l, dim=1)
# Path: basicsr/archs/tempo_model_arch.py
class CouplePropModuleWithFlowNet(nn.Module):
"""Couple Propagation Module.
Args:
num_ch (int): Number of input channels. Default: 4.
num_feat (int): Number of channels. Default: 64.
num_block (int): Number of residual blocks for each branch. Default: 5.
spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
"""
def __init__(self,
num_ch=4,
num_feat=64,
num_block=5,
spynet_path=None):
super().__init__()
self.num_ch = num_ch
self.num_feat = num_feat
# alignment
self.spynet = SpyNet(spynet_path)
# propagation
self.backward_trunk = ConvResidualBlocks(1 * num_feat + num_ch, num_feat, num_block)
self.backward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
self.forward_trunk = ConvResidualBlocks(2 * num_feat + num_ch, num_feat, num_block)
self.forward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
# reconstruction
self.conv_last = nn.Conv2d(num_feat, num_ch, 3, 1, 1)
# activation functions
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def get_flow(self, x):
b, n, c, h, w = x.size()
x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)
flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)
return flows_forward, flows_backward
def forward(self, x, lrs):
b, n, _, h_input, w_input = x.size()
h, w = x.shape[3:]
# compute flow
flows_forward, flows_backward = self.get_flow(lrs)
# backward branch
out_l = []
feat_prop = x.new_zeros(b, self.num_feat, h, w)
for i in range(n - 1, -1, -1):
x_i = x[:, i, :, :, :]
if i < n - 1:
flow = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([x_i, feat_prop], dim=1)
feat_prop = self.backward_trunk(feat_prop)
out_l.insert(0, feat_prop)
# forward branch
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, n):
x_i = x[:, i, :, :, :]
if i > 0:
flow = flows_forward[:, i - 1, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([x_i, out_l[i], feat_prop], dim=1)
feat_prop = self.forward_trunk(feat_prop)
out = self.conv_last(feat_prop)
out += x_i
out_l[i] = out
return torch.stack(out_l, dim=1)
# Path: ldm/modules/diffusionmodules/openaimodel.py
from abc import abstractmethod
from functools import partial
from typing import Iterable
from einops import rearrange
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
SpatialTemporalConv,
)
from ldm.modules.attention import SpatialTransformer, SpatialTransformerV2, TemporalAttention
from ldm.modules.spade import SPADE
from basicsr.archs.stylegan2_arch import ConvLayer, EqualConv2d
from basicsr.archs.tempo_model_arch import CouplePropModule, CouplePropModuleWithFlowNet
from omegaconf.listconfig import ListConfig
from omegaconf.listconfig import ListConfig
from omegaconf.listconfig import ListConfig
from omegaconf.listconfig import ListConfig
import math
import torch
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import xformers
import xformers.ops
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
def exists(val):
return val is not None
def cal_fea_cossim(fea_1, fea_2, save_dir=None):
cossim_fuc = nn.CosineSimilarity(dim=-1, eps=1e-6)
if save_dir is None:
save_dir_1 = './cos_sim64_1_not.txt'
save_dir_2 = './cos_sim64_2_not.txt'
b, c, h, w = fea_1.size()
fea_1 = fea_1.reshape(b, c, h*w)
fea_2 = fea_2.reshape(b, c, h*w)
| cos_sim = cossim_fuc(fea_1, fea_2) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Institute4FutureHealth/CHA
# Path: datapipes/datapipe_types.py
class DatapipeType(str, Enum):
MEMORY = "memory"
# Path: interface/base.py
class Interface(BaseModel):
gr: Any = None
interface: Any = None
@model_validator(mode="before")
def validate_environment(cls, values: Dict) -> Dict:
"""
Validate that api key and python package exists in environment.
This function checks if the `gradio` Python package is installed in the environment. If the package is not found, it raises a `ValueError`
with an appropriate error message.
Args:
cls (object): The class to which this method belongs.
values (Dict): A dictionary containing the environment values.
Return:
Dict: The updated `values` dictionary with the `gradio` package imported.
Raise:
ValueError: If the `gradio` package is not found in the environment.
Example:
.. code-block:: python
from langchain import ReActChain, OpenAI
react = ReAct(llm=OpenAI())
"""
try:
import gradio as gr
values["gr"] = gr
except ImportError:
raise ValueError(
"Could not import gradio python package. "
"Please install it with `pip install gradio`."
)
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def prepare_interface(
self,
respond,
reset,
upload_meta,
available_tasks,
share=False,
):
"""
Prepare the Gradio interface for the chatbot.
This method sets up the Gradio interface for the chatbot.
It creates various UI components such as a textbox for user input, a checkbox for enabling/disabling chat history,
a dropdown for selecting tasks, and a clear button to reset the interface. The interface is then launched and stored
in the `self.interface` attribute.
Args:
self (object): The instance of the class.
respond (function): The function to handle user input and generate responses.
reset (function): The function to reset the chatbot state.
upload_meta (Any): meta data.
available_tasks (list, optional): A list of available tasks. Defaults to an empty list.
share (bool, optional): Flag indicating whether to enable sharing the interface. Defaults to False.
Return:
None
Example:
.. code-block:: python
from langchain import ReActChain, OpenAI
react = ReAct(llm=OpenAI())
"""
with self.gr.Blocks() as demo:
chatbot = self.gr.Chatbot(bubble_full_width=False)
with self.gr.Row():
msg = self.gr.Textbox(
scale=9,
label="Question",
info="Put your query here and press enter.",
)
# btn = self.gr.UploadButton(
# "📁",
# scale=1,
# file_types=["image", "video", "audio", "text"],
# )
check_box = self.gr.Checkbox(
scale=1,
value=True,
label="Use History",
info="If checked, the chat history will be sent over along with the next query.",
)
with self.gr.Row():
tasks = self.gr.Dropdown(
value=[],
choices=available_tasks,
multiselect=True,
label="Tasks List",
info="The list of available tasks. Select the ones that you want to use.",
)
clear = self.gr.ClearButton([msg, chatbot])
clear.click(reset)
msg.submit(
respond,
[msg, chatbot, check_box, tasks],
[msg, chatbot],
)
# btn.upload(
# upload_meta, [chatbot, btn], [chatbot], queue=False
# )
demo.launch(share=share)
self.interface = demo
def close(self):
"""
Close the Gradio interface.
This method closes the Gradio interface associated with the chatbot.
It calls the `close` method of the interface object stored in the `self.interface` attribute.
Args:
self (object): The instance of the class.
Return:
None
Example:
.. code-block:: python
from langchain import ReActChain, OpenAI
react = ReAct(llm=OpenAI())
"""
self.interface.close()
# Path: llms/llm_types.py
class LLMType(str, Enum):
OPENAI = "openai"
ANTHROPIC = "anthropic"
# Path: orchestrator/orchestrator.py
class Orchestrator(BaseModel):
"""
**Description:**
The Orchestrator class is the main execution heart of the CHA. All the components of the Orchestrator are initialized and executed here.
The Orchestrator will start a new answering cycle by calling the `run` method. From there, the planning is started,
then tasks will be executed one by one till the **Task Planner** decides that no more information is needed.
Finally the **Task Planner** final answer will be routed to the **Final Response Generator** to generate an empathic final
response that is returned to the user.
"""
planner: BasePlanner = None
datapipe: DataPipe = None
promptist: Any = None
response_generator: BaseResponseGenerator = None
available_tasks: Dict[str, BaseTask] = {}
max_retries: int = 16
max_task_execute_retries: int = 3
max_planner_execute_retries: int = 16
max_final_answer_execute_retries: int = 3
role: int = 0
verbose: bool = False
planner_logger: Optional[logging.Logger] = None
tasks_logger: Optional[logging.Logger] = None
orchestrator_logger: Optional[logging.Logger] = None
final_answer_generator_logger: Optional[logging.Logger] = None
promptist_logger: Optional[logging.Logger] = None
error_logger: Optional[logging.Logger] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def print_log(self, log_name: str, message: str):
if self.verbose:
if log_name == "planner":
self.planner_logger.debug(message)
if log_name == "task":
self.tasks_logger.debug(message)
if log_name == "orchestrator":
self.orchestrator_logger.debug(message)
if log_name == "response_generator":
self.final_answer_generator_logger.debug(message)
if log_name == "promptist":
self.promptist_logger.debug(message)
if log_name == "error":
self.error_logger.debug(message)
@classmethod
def initialize(
self,
planner_llm: str = LLMType.OPENAI,
planner_name: str = PlannerType.ZERO_SHOT_REACT_PLANNER,
datapipe_name: str = DatapipeType.MEMORY,
promptist_name: str = "",
response_generator_llm: str = LLMType.OPENAI,
response_generator_name: str = ResponseGeneratorType.BASE_GENERATOR,
available_tasks: Optional[List[str]] = None,
verbose: bool = False,
**kwargs,
) -> Orchestrator:
"""
This class method initializes the Orchestrator by setting up the planner, datapipe, promptist, response generator,
and available tasks.
Args:
planner_llm (str): LLMType to be used as LLM for planner.
planner_name (str): PlannerType to be used as task planner.
datapipe_name (str): DatapipeType to be used as data pipe.
promptist_name (str): Not implemented yet!
response_generator_llm (str): LLMType to be used as LLM for response generator.
response_generator_name (str): ResponseGeneratorType to be used as response generator.
available_tasks (List[str]): List of available task using TaskType.
verbose (bool): Specifies if the debugging logs be printed or not.
**kwargs (Any): Additional keyword arguments.
Return:
Orchestrator: Initialized Orchestrator instance.
Example:
.. code-block:: python
from datapipes.datapipe_types import DatapipeType
from planners.planner_types import PlannerType
from response_generators.response_generator_types import ResponseGeneratorType
from tasks.task_types import TaskType
from llms.llm_types import LLMType
from orchestrator.orchestrator import Orchestrator
#If you want to use playwright task
from tasks.playwright.utils import create_sync_playwright_browser
sync_browser = create_sync_playwright_browser()
#
orchestrator = Orchestrator.initialize(
planner_llm=LLMType.OPENAI,
planner_name=PlannerType.ZERO_SHOT_REACT_PLANNER,
datapipe_name=DatapipeType.MEMORY,
promptist_name="",
response_generator_llm=LLMType.OPENAI,
response_generator_name=ResponseGeneratorType.BASE_GENERATOR,
available_tasks=[TaskType.SERPAPI, TaskType.EXTRACT_TEXT],
sync_browser=sync_browser,
verbose=self.verbose,
**kwargs
)
"""
if available_tasks is None:
available_tasks = []
planner_logger = (
tasks_logger
) = (
orchestrator_logger
) = (
final_answer_generator_logger
) = promptist_logger = error_logger = None
if verbose:
planner_logger = CustomDebugFormatter.create_logger(
"Planner", "cyan"
)
tasks_logger = CustomDebugFormatter.create_logger(
"Task", "purple"
)
orchestrator_logger = CustomDebugFormatter.create_logger(
"Orchestrator", "green"
)
final_answer_generator_logger = (
CustomDebugFormatter.create_logger(
"Response Generator", "blue"
)
)
promptist_logger = CustomDebugFormatter.create_logger(
"Promptist", "blue"
)
error_logger = CustomDebugFormatter.create_logger(
"Error", "red"
)
datapipe = initialize_datapipe(
datapipe=datapipe_name, **kwargs
)
if verbose:
orchestrator_logger.debug(
f"Datapipe {datapipe_name} is successfully initialized.\n"
)
tasks = {}
for task in available_tasks:
kwargs["datapipe"] = datapipe
tasks[task] = initialize_task(task=task, **kwargs)
if verbose:
orchestrator_logger.debug(
f"Task '{task}' is successfully initialized."
)
planner = initialize_planner(
tasks=list(tasks.values()),
llm=planner_llm,
planner=planner_name,
**kwargs,
)
if verbose:
orchestrator_logger.debug(
f"Planner {planner_name} is successfully initialized."
)
response_generator = initialize_response_generator(
response_generator=response_generator_name,
llm=response_generator_llm,
**kwargs,
)
if verbose:
orchestrator_logger.debug(
f"Response Generator {response_generator_name} is successfully initialized."
)
return self(
planner=planner,
datapipe=datapipe,
promptist=None,
response_generator=response_generator,
available_tasks=tasks,
verbose=verbose,
planner_logger=planner_logger,
tasks_logger=tasks_logger,
orchestrator_logger=orchestrator_logger,
final_answer_generator_logger=final_answer_generator_logger,
promptist_logger=promptist_logger,
error_logger=error_logger,
)
def process_meta(self) -> bool:
"""
This method processes the meta information and returns a boolean value. Currently, it always returns False.
Return:
bool: False
"""
return False
def execute_task(self, action) -> str:
"""
Execute the specified task based on the planner's selected **Action**. This method executes a specific task based on the provided action.
It takes an action as input and retrieves the corresponding task from the available tasks dictionary.
It then executes the task with the given task input. If the task has an output_type, it stores the result in the datapipe and returns
a message indicating the storage key. Otherwise, it returns the result directly.
Args:
action (Action): Action to be executed.
Return:
str: Result of the task execution.
bool: If the task result should be directly returned to the user and stop planning.
"""
retries = 0
self.print_log(
"task",
f"---------------\nExecuting task:\nTask Name: {action.task}\nTask Inputs: {action.task_input}\n",
)
task_input = action.task_input
error_message = ""
while retries < self.max_task_execute_retries:
try:
task = self.available_tasks[action.task]
result = task.execute(task_input)
self.print_log(
"task",
f"Task is executed successfully\nResult: {result}\n---------------\n",
)
return result, task.return_direct
except Exception as e:
self.print_log(
"error",
f"Error running task: \n{e}\n---------------\n",
)
logging.exception(e)
error_message = e
retries += 1
return (
f"Error executing task {action.task}: {error_message}",
False,
)
def planner_generate_prompt(self, query) -> str:
"""
Generate a prompt from the query to make it more understandable for both planner and response generator.
Not implemented yet.
Args:
query (str): Input query.
Return:
str: Generated prompt.
"""
return query
def _retrieve_last_action_from_datapip(self, previous_actions):
print("previous action check", previous_actions)
if len(previous_actions) > 0:
for i in range(len(previous_actions) - 1, -1, -1):
if previous_actions[i].task in [
TaskType.READ_FROM_DATAPIPE
]:
return None
match = re.search(
r"\$(datapipe:[^\$]+)\$",
previous_actions[i].task_response,
)
print(
"previous action check",
previous_actions[i],
match,
)
if match:
action = Action(
TaskType.READ_FROM_DATAPIPE,
match.group(1),
"",
"",
)
action.task_response, ـ = self.execute_task(
action
)
return action
return None
def response_generator_generate_prompt(
self,
final_response: str = "",
history: str = "",
meta: List[str] = None,
previous_actions: List[Action] = None,
use_history: bool = False,
) -> str:
if meta is None:
meta = []
if previous_actions is None:
previous_actions = []
prompt = (
"MetaData: {meta}\n\n"
"History: \n{history}\n\n"
"Plan: \n{plan}\n\n"
)
if use_history:
prompt = prompt.replace("{history}", history)
prompt = prompt.replace("{meta}", ", ".join(meta)).replace(
"{plan}",
"".join(
[
f"{self.available_tasks[action.task].chat_name}: {action.task_response}\n"
if action.task in self.available_tasks
else ""
for action in previous_actions
]
),
) # + f"\n{final_response}")
return prompt
def plan(
self, query, history, meta, previous_actions, use_history
) -> List[Union[Action, PlanFinish]]:
"""
Plan actions based on the query, history, and previous actions using the selected planner type.
This method generates a plan of actions based on the provided query, history, previous actions, and use_history flag.
It calls the plan method of the planner and returns a list of actions or plan finishes.
Args:
query (str): Input query.
history (str): History information.
meta (Any): meta information.
previous_actions (List[Action]): List of previous actions.
use_history (bool): Flag indicating whether to use history.
Return:
List[Union[Action, PlanFinish]]: List of planned actions.
Example:
.. code-block:: python
from langchain import ReActChain, OpenAI
react = ReAct(llm=OpenAI())
"""
return self.planner.plan(
query, history, meta, previous_actions, use_history
)
def generate_final_answer(self, query, thinker) -> str:
"""
Generate the final answer using the response generator.
This method generates the final answer based on the provided query and thinker.
It calls the generate method of the response generator and returns the generated answer.
Args:
query (str): Input query.
thinker (str): Thinking component.
Return:
str: Final generated answer.
"""
retries = 0
while retries < self.max_final_answer_execute_retries:
try:
return self.response_generator.generate(
query=query, thinker=thinker
)
except Exception as e:
print(e)
retries += 1
return "We currently have problem processing your question. Please try again after a while."
def run(
self,
query: str,
meta: List[str] = None,
history: str = "",
use_history: bool = False,
**kwargs: Any,
) -> str:
"""
This method runs the orchestrator by taking a query, meta information, history, and other optional keyword arguments as input.
It initializes variables for tracking the execution, generates a prompt based on the query, and sets up a loop for executing actions.
Within the loop, it plans actions, executes tasks, and updates the previous actions list.
If a PlanFinish action is encountered, the loop breaks, and the final response is set.
If any errors occur during execution, the loop retries a limited number of times before setting a final error response.
Finally, it generates the final response using the prompt and thinker, and returns the final response along with the previous actions.
Args:
query (str): Input query.
meta (List[str]): Meta information.
history (str): History information.
use_history (bool): Flag indicating whether to use history.
**kwargs (Any): Additional keyword arguments.
Return:
Tuple[str, List[Action]]:Final response and list of previous actions.
"""
if meta is None:
meta = []
i = 0
previous_actions = []
meta_infos = ""
for meta_data in meta:
key = self.datapipe.store(meta_data)
meta_infos += (
f"The file with the name ${meta_data.split('/')[-1]}$ is stored with the key $datapipe:{key}$."
"Pass this key to the tools when you want to send them over to the tool\n"
)
prompt = self.planner_generate_prompt(query)
if "google_translate" in self.available_tasks:
prompt = self.available_tasks["google_translate"].execute(
prompt + "$#en"
)
source_language = prompt[1]
prompt = prompt[0]
# history = self.available_tasks["google_translate"].execute(history+"$#en").text
final_response = ""
finished = False
self.print_log("planner", "Planing Started...\n")
while True:
try:
self.print_log(
"planner",
f"Continueing Planing... Try number {i}\n\n",
)
actions = self.plan(
query=prompt,
history=history,
meta=meta_infos,
previous_actions=previous_actions,
use_history=use_history,
)
for action in actions:
if isinstance(action, PlanFinish):
final_response = action.response
finished = True
break
else:
return_direct = False, False
if "Exception" not in action.task:
(
action.task_response,
return_direct,
) = self.execute_task(action)
i = 0
previous_actions.append(action)
if return_direct:
print("inside return direct")
final_response = action.task_response
finished = True
if finished:
action = self._retrieve_last_action_from_datapip(
previous_actions
)
if action is not None:
previous_actions.append(action)
break
except ValueError as error:
self.print_log(
"error", f"Planing Error:\n{error}\n\n"
)
i += 1
if i > self.max_retries:
final_response = "Problem preparing the answer. Please try again."
break
previous_actions.append(
Action(
"Exception",
"Invalid or incomplete response",
"".join(error.args),
"",
)
)
self.print_log(
"planner",
f"Planner final response: {final_response}\nPlaning Ended...\n\n",
)
final_response = self.response_generator_generate_prompt(
final_response=final_response,
history=history,
meta=meta_infos,
previous_actions=previous_actions,
use_history=use_history,
)
self.print_log(
"response_generator",
f"Final Answer Generation Started...\n\nInput Prompt: \n{final_response}",
)
final_response = self.generate_final_answer(
query=query, thinker=final_response
)
self.print_log(
"response_generator",
f"Response: {final_response}\n\nFinal Answer Generation Ended.\n",
)
if "google_translate" in self.available_tasks:
final_response = self.available_tasks[
"google_translate"
].execute(f"{final_response}$#{source_language}")[0]
return final_response, previous_actions
# Path: planners/action.py
class Action:
task: str
task_input: str
task_response: str
log: str
# Path: planners/planner_types.py
class PlannerType(str, Enum):
ZERO_SHOT_REACT_PLANNER = "zero_shot_react_planner"
# Path: response_generators/response_generator_types.py
class ResponseGeneratorType(str, Enum):
BASE_GENERATOR = "base-generator"
# Path: tasks/playwright/utils.py
def create_sync_playwright_browser(
headless: bool = True,
) -> SyncBrowser:
"""
This function creates and launches a Playwright synchronous browser.
Args:
headless (bool, optional): Whether to launch the browser in headless mode. Default is True.
Return:
SyncBrowser: The created Playwright synchronous browser instance.
"""
from playwright.sync_api import sync_playwright
browser = sync_playwright().start()
return browser.chromium.launch(headless=headless)
# Path: tasks/task_types.py
class TaskType(str, Enum):
SERPAPI = "serpapi"
CLICK = "click"
GET_CURRENT_PAGE = "current_page"
EXTRACT_HYPERLINKS = "extract_hyperlinks"
EXTRACT_TEXT = "extract_text"
GET_ELEMENTS = "get_elements"
NAVIGATE_BACK = "navigate_back"
NAVIGATE = "navigate"
AFFECT_SLEEP_GET = "affect_sleep_get"
AFFECT_ACTIVITY_GET = "affect_activity_get"
AFFECT_SLEEP_ANALYSIS = "affect_sleep_analysis"
AFFECT_ACTIVITY_ANALYSIS = "affect_activity_analysis"
GOOGLE_TRANSLATE = "google_translate"
ASK_USER = "ask_user"
READ_FROM_DATAPIPE = "read_from_datapipe"
TEST_FILE = "test_file"
# Path: tasks/types.py
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = {
TaskType.SERPAPI: SerpAPI,
TaskType.CLICK: Click,
TaskType.GET_CURRENT_PAGE: CurrentWebPage,
TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks,
TaskType.EXTRACT_TEXT: ExtractText,
TaskType.GET_ELEMENTS: GetElements,
TaskType.NAVIGATE_BACK: NavigateBack,
TaskType.NAVIGATE: Navigate,
TaskType.AFFECT_SLEEP_GET: SleepGet,
TaskType.AFFECT_ACTIVITY_GET: ActivityGet,
TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis,
TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis,
TaskType.GOOGLE_TRANSLATE: GoogleTranslate,
TaskType.ASK_USER: AskUser,
TaskType.TEST_FILE: TestFile,
TaskType.READ_FROM_DATAPIPE: ReadDataPipe,
}
# Path: CHA.py
from typing import Any
from typing import List
from typing import Tuple
from pydantic import BaseModel
from datapipes.datapipe_types import DatapipeType
from interface.base import Interface
from llms.llm_types import LLMType
from orchestrator.orchestrator import Orchestrator
from planners.action import Action
from planners.planner_types import PlannerType
from response_generators.response_generator_types import (
ResponseGeneratorType,
)
from tasks.playwright.utils import create_sync_playwright_browser
from tasks.task_types import TaskType
from tasks.types import TASK_TO_CLASS
class CHA(BaseModel):
name: str = "CHA"
previous_actions: List[Action] = []
orchestrator: Orchestrator = None
sync_browser: Any = None
planner_llm: str = LLMType.OPENAI
planner: str = PlannerType.ZERO_SHOT_REACT_PLANNER
datapipe: str = DatapipeType.MEMORY
promptist: str = ""
response_generator_llm: str = LLMType.OPENAI
response_generator: str = ResponseGeneratorType.BASE_GENERATOR
meta: List[str] = []
verbose: bool = False
def _generate_history(
self, chat_history: List[Tuple[str, str]] = None
) -> str:
if chat_history is None:
chat_history = []
print("chat history", chat_history)
history = "".join(
[
f"\n------------\nUser: {chat[0]}\nCHA: {chat[1]}\n------------\n"
for chat in chat_history
]
)
if len(self.previous_actions) > 0:
history += "Previous Actions: " + "".join(
[
f"\n------------\naction: {action.task}\naction_response: {action.task_response}\n------------\n"
for action in self.previous_actions
if action.task != "Exception"
]
)
return history
def _run(
self,
query: str,
chat_history: List[Tuple[str, str]] = None,
tasks_list: List[str] = None,
use_history: bool = False,
| **kwargs, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Czm369/MixPL
# Path: mmdet/models/layers/bbox_nms.py
def multiclass_nms(
multi_bboxes: Tensor,
multi_scores: Tensor,
score_thr: float,
nms_cfg: ConfigType,
max_num: int = -1,
score_factors: Optional[Tensor] = None,
return_inds: bool = False,
box_dim: int = 4
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_cfg (Union[:obj:`ConfigDict`, dict]): a dict that contains
the arguments of nms operations.
max_num (int, optional): if there are more than max_num bboxes after
NMS, only top max_num will be kept. Default to -1.
score_factors (Tensor, optional): The factors multiplied to scores
before applying NMS. Default to None.
return_inds (bool, optional): Whether return the indices of kept
bboxes. Default to False.
box_dim (int): The dimension of boxes. Defaults to 4.
Returns:
Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:
(dets, labels, indices (optional)), tensors of shape (k, 5),
(k), and (k). Dets are boxes with scores. Labels are 0-based.
"""
num_classes = multi_scores.size(1) - 1
# exclude background category
if multi_bboxes.shape[1] > box_dim:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, box_dim)
else:
bboxes = multi_bboxes[:, None].expand(
multi_scores.size(0), num_classes, box_dim)
scores = multi_scores[:, :-1]
labels = torch.arange(num_classes, dtype=torch.long, device=scores.device)
labels = labels.view(1, -1).expand_as(scores)
bboxes = bboxes.reshape(-1, box_dim)
scores = scores.reshape(-1)
labels = labels.reshape(-1)
if not torch.onnx.is_in_onnx_export():
# NonZero not supported in TensorRT
# remove low scoring boxes
valid_mask = scores > score_thr
# multiply score_factor after threshold to preserve more bboxes, improve
# mAP by 1% for YOLOv3
if score_factors is not None:
# expand the shape to match original shape of score
score_factors = score_factors.view(-1, 1).expand(
multi_scores.size(0), num_classes)
score_factors = score_factors.reshape(-1)
scores = scores * score_factors
if not torch.onnx.is_in_onnx_export():
# NonZero not supported in TensorRT
inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]
else:
# TensorRT NMS plugin has invalid output filled with -1
# add dummy data to make detection output correct.
bboxes = torch.cat([bboxes, bboxes.new_zeros(1, box_dim)], dim=0)
scores = torch.cat([scores, scores.new_zeros(1)], dim=0)
labels = torch.cat([labels, labels.new_zeros(1)], dim=0)
if bboxes.numel() == 0:
if torch.onnx.is_in_onnx_export():
raise RuntimeError('[ONNX Error] Can not record NMS '
'as it has not been executed this time')
dets = torch.cat([bboxes, scores[:, None]], -1)
if return_inds:
return dets, labels, inds
else:
return dets, labels
dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
if return_inds:
return dets, labels[keep], inds[keep]
else:
return dets, labels[keep]
# Path: mmdet/models/losses/accuracy.py
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
# Path: mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None:
super().__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
# Path: mmdet/models/utils/misc.py
def empty_instances(batch_img_metas: List[dict],
device: torch.device,
task_type: str,
instance_results: OptInstanceList = None,
mask_thr_binary: Union[int, float] = 0,
box_type: Union[str, type] = 'hbox',
use_box_type: bool = False,
num_classes: int = 80,
score_per_cls: bool = False) -> List[InstanceData]:
"""Handle predicted instances when RoI is empty.
Note: If ``instance_results`` is not None, it will be modified
in place internally, and then return ``instance_results``
Args:
batch_img_metas (list[dict]): List of image information.
device (torch.device): Device of tensor.
task_type (str): Expected returned task type. it currently
supports bbox and mask.
instance_results (list[:obj:`InstanceData`]): List of instance
results.
mask_thr_binary (int, float): mask binarization threshold.
Defaults to 0.
box_type (str or type): The empty box type. Defaults to `hbox`.
use_box_type (bool): Whether to warp boxes with the box type.
Defaults to False.
num_classes (int): num_classes of bbox_head. Defaults to 80.
score_per_cls (bool): Whether to generate classwise score for
the empty instance. ``score_per_cls`` will be True when the model
needs to produce raw results without nms. Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
"""
assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \
f' but got {task_type}'
if instance_results is not None:
assert len(instance_results) == len(batch_img_metas)
results_list = []
for img_id in range(len(batch_img_metas)):
if instance_results is not None:
results = instance_results[img_id]
assert isinstance(results, InstanceData)
else:
results = InstanceData()
if task_type == 'bbox':
_, box_type = get_box_type(box_type)
bboxes = torch.zeros(0, box_type.box_dim, device=device)
if use_box_type:
bboxes = box_type(bboxes, clone=False)
results.bboxes = bboxes
score_shape = (0, num_classes + 1) if score_per_cls else (0, )
results.scores = torch.zeros(score_shape, device=device)
results.labels = torch.zeros((0, ),
device=device,
dtype=torch.long)
else:
# TODO: Handle the case where rescale is false
img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2]
# the type of `im_mask` will be torch.bool or torch.uint8,
# where uint8 if for visualization and debugging.
im_mask = torch.zeros(
0,
img_h,
img_w,
device=device,
dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8)
results.masks = im_mask
results_list.append(results)
return results_list
# Path: mmdet/registry.py
MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])
# Path: mmdet/structures/bbox/transforms.py
def get_box_tensor(boxes: Union[Tensor, BaseBoxes]) -> Tensor:
"""Get tensor data from box type boxes.
Args:
boxes (Tensor or BaseBoxes): boxes with type of tensor or box type.
If its type is a tensor, the boxes will be directly returned.
If its type is a box type, the `boxes.tensor` will be returned.
Returns:
Tensor: boxes tensor.
"""
if isinstance(boxes, BaseBoxes):
boxes = boxes.tensor
return boxes
# Path: mmdet/structures/bbox/transforms.py
def scale_boxes(boxes: Union[Tensor, BaseBoxes],
scale_factor: Tuple[float, float]) -> Union[Tensor, BaseBoxes]:
"""Scale boxes with type of tensor or box type.
Args:
boxes (Tensor or :obj:`BaseBoxes`): boxes need to be scaled. Its type
can be a tensor or a box type.
scale_factor (Tuple[float, float]): factors for scaling boxes.
The length should be 2.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Scaled boxes.
"""
if isinstance(boxes, BaseBoxes):
boxes.rescale_(scale_factor)
return boxes
else:
# Tensor boxes will be treated as horizontal boxes
repeat_num = int(boxes.size(-1) / 2)
scale_factor = boxes.new_tensor(scale_factor).repeat((1, repeat_num))
return boxes * scale_factor
# Path: mmdet/utils/typing_utils.py
# Path: projects/Detic_new/detic/detic_bbox_head.py
import json
import torch
from typing import List, Optional
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from torch.nn import functional as F
from mmdet.models.layers import multiclass_nms
from mmdet.models.losses import accuracy
from mmdet.models.roi_heads.bbox_heads import Shared2FCBBoxHead
from mmdet.models.utils import empty_instances
from mmdet.registry import MODELS
from mmdet.structures.bbox import get_box_tensor, scale_boxes
from mmdet.utils import ConfigType, InstanceList
# Copyright (c) OpenMMLab. All rights reserved.
def load_class_freq(path='datasets/metadata/lvis_v1_train_cat_info.json',
freq_weight=0.5):
cat_info = json.load(open(path, 'r'))
cat_info = torch.tensor(
[c['image_count'] for c in sorted(cat_info, key=lambda x: x['id'])])
freq_weight = cat_info.float()**freq_weight
return freq_weight
def get_fed_loss_inds(labels, num_sample_cats, C, weight=None):
appeared = torch.unique(labels) # C'
prob = appeared.new_ones(C + 1).float()
prob[-1] = 0
if len(appeared) < num_sample_cats:
if weight is not None:
prob[:C] = weight.float().clone()
prob[appeared] = 0
more_appeared = torch.multinomial(
prob, num_sample_cats - len(appeared), replacement=False)
appeared = torch.cat([appeared, more_appeared])
return appeared
@MODELS.register_module()
class DeticBBoxHead(Shared2FCBBoxHead):
def __init__(self,
image_loss_weight: float = 0.1,
use_fed_loss: bool = False,
cat_freq_path: str = '',
fed_loss_freq_weight: float = 0.5,
fed_loss_num_cat: int = 50,
cls_predictor_cfg: ConfigType = dict(
type='ZeroShotClassifier'),
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
# reconstruct fc_cls and fc_reg since input channels are changed
assert self.with_cls
self.cls_predictor_cfg = cls_predictor_cfg
cls_channels = self.num_classes
self.cls_predictor_cfg.update(
in_features=self.cls_last_dim, out_features=cls_channels)
self.fc_cls = MODELS.build(self.cls_predictor_cfg)
self.init_cfg += [
dict(type='Caffe2Xavier', override=dict(name='reg_fcs'))
]
self.image_loss_weight = image_loss_weight
self.use_fed_loss = use_fed_loss
self.cat_freq_path = cat_freq_path
self.fed_loss_freq_weight = fed_loss_freq_weight
self.fed_loss_num_cat = fed_loss_num_cat
if self.use_fed_loss:
freq_weight = load_class_freq(cat_freq_path, fed_loss_freq_weight)
self.register_buffer('freq_weight', freq_weight)
else:
self.freq_weight = None
def _predict_by_feat_single(
self,
roi: Tensor,
cls_score: Tensor,
bbox_pred: Tensor,
img_meta: dict,
| rescale: bool = False, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: GaoShuang98/DINO-Mix
# Path: models/backbones/facebookresearch_dinov2_main/dinov2/dinov2/data/datasets/image_net.py
class ImageNet(ExtendedVisionDataset):
Target = Union[_Target]
Split = Union[_Split]
def __init__(
self,
*,
split: "ImageNet.Split",
root: str,
extra: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
self._extra_root = extra
self._split = split
self._entries = None
self._class_ids = None
self._class_names = None
@property
def split(self) -> "ImageNet.Split":
return self._split
def _get_extra_full_path(self, extra_path: str) -> str:
return os.path.join(self._extra_root, extra_path)
def _load_extra(self, extra_path: str) -> np.ndarray:
extra_full_path = self._get_extra_full_path(extra_path)
return np.load(extra_full_path, mmap_mode="r")
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
extra_full_path = self._get_extra_full_path(extra_path)
os.makedirs(self._extra_root, exist_ok=True)
np.save(extra_full_path, extra_array)
@property
def _entries_path(self) -> str:
return f"entries-{self._split.value.upper()}.npy"
@property
def _class_ids_path(self) -> str:
return f"class-ids-{self._split.value.upper()}.npy"
@property
def _class_names_path(self) -> str:
return f"class-names-{self._split.value.upper()}.npy"
def _get_entries(self) -> np.ndarray:
if self._entries is None:
self._entries = self._load_extra(self._entries_path)
assert self._entries is not None
return self._entries
def _get_class_ids(self) -> np.ndarray:
if self._split == _Split.TEST:
assert False, "Class IDs are not available in TEST split"
if self._class_ids is None:
self._class_ids = self._load_extra(self._class_ids_path)
assert self._class_ids is not None
return self._class_ids
def _get_class_names(self) -> np.ndarray:
if self._split == _Split.TEST:
assert False, "Class names are not available in TEST split"
if self._class_names is None:
self._class_names = self._load_extra(self._class_names_path)
assert self._class_names is not None
return self._class_names
def find_class_id(self, class_index: int) -> str:
class_ids = self._get_class_ids()
return str(class_ids[class_index])
def find_class_name(self, class_index: int) -> str:
class_names = self._get_class_names()
return str(class_names[class_index])
def get_image_data(self, index: int) -> bytes:
entries = self._get_entries()
actual_index = entries[index]["actual_index"]
class_id = self.get_class_id(index)
image_relpath = self.split.get_image_relpath(actual_index, class_id)
image_full_path = os.path.join(self.root, image_relpath)
with open(image_full_path, mode="rb") as f:
image_data = f.read()
return image_data
def get_target(self, index: int) -> Optional[Target]:
entries = self._get_entries()
class_index = entries[index]["class_index"]
return None if self.split == _Split.TEST else int(class_index)
def get_targets(self) -> Optional[np.ndarray]:
entries = self._get_entries()
return None if self.split == _Split.TEST else entries["class_index"]
def get_class_id(self, index: int) -> Optional[str]:
entries = self._get_entries()
class_id = entries[index]["class_id"]
return None if self.split == _Split.TEST else str(class_id)
def get_class_name(self, index: int) -> Optional[str]:
entries = self._get_entries()
class_name = entries[index]["class_name"]
return None if self.split == _Split.TEST else str(class_name)
def __len__(self) -> int:
entries = self._get_entries()
assert len(entries) == self.split.length
return len(entries)
def _load_labels(self, labels_path: str) -> List[Tuple[str, str]]:
labels_full_path = os.path.join(self.root, labels_path)
labels = []
try:
with open(labels_full_path, "r") as f:
reader = csv.reader(f)
for row in reader:
class_id, class_name = row
labels.append((class_id, class_name))
except OSError as e:
raise RuntimeError(f'can not read labels file "{labels_full_path}"') from e
return labels
def _dump_entries(self) -> None:
split = self.split
if split == ImageNet.Split.TEST:
dataset = None
sample_count = split.length
max_class_id_length, max_class_name_length = 0, 0
else:
labels_path = "labels.txt"
logger.info(f'loading labels from "{labels_path}"')
labels = self._load_labels(labels_path)
# NOTE: Using torchvision ImageFolder for consistency
from torchvision.datasets import ImageFolder
dataset_root = os.path.join(self.root, split.get_dirname())
dataset = ImageFolder(dataset_root)
sample_count = len(dataset)
max_class_id_length, max_class_name_length = -1, -1
for sample in dataset.samples:
_, class_index = sample
class_id, class_name = labels[class_index]
max_class_id_length = max(len(class_id), max_class_id_length)
max_class_name_length = max(len(class_name), max_class_name_length)
dtype = np.dtype(
[
("actual_index", "<u4"),
("class_index", "<u4"),
("class_id", f"U{max_class_id_length}"),
("class_name", f"U{max_class_name_length}"),
]
)
entries_array = np.empty(sample_count, dtype=dtype)
if split == ImageNet.Split.TEST:
old_percent = -1
for index in range(sample_count):
percent = 100 * (index + 1) // sample_count
if percent > old_percent:
logger.info(f"creating entries: {percent}%")
old_percent = percent
actual_index = index + 1
class_index = np.uint32(-1)
class_id, class_name = "", ""
entries_array[index] = (actual_index, class_index, class_id, class_name)
else:
class_names = {class_id: class_name for class_id, class_name in labels}
assert dataset
old_percent = -1
for index in range(sample_count):
percent = 100 * (index + 1) // sample_count
if percent > old_percent:
logger.info(f"creating entries: {percent}%")
old_percent = percent
image_full_path, class_index = dataset.samples[index]
image_relpath = os.path.relpath(image_full_path, self.root)
class_id, actual_index = split.parse_image_relpath(image_relpath)
class_name = class_names[class_id]
entries_array[index] = (actual_index, class_index, class_id, class_name)
logger.info(f'saving entries to "{self._entries_path}"')
self._save_extra(entries_array, self._entries_path)
def _dump_class_ids_and_names(self) -> None:
split = self.split
if split == ImageNet.Split.TEST:
return
entries_array = self._load_extra(self._entries_path)
max_class_id_length, max_class_name_length, max_class_index = -1, -1, -1
for entry in entries_array:
class_index, class_id, class_name = (
entry["class_index"],
entry["class_id"],
entry["class_name"],
)
max_class_index = max(int(class_index), max_class_index)
max_class_id_length = max(len(str(class_id)), max_class_id_length)
max_class_name_length = max(len(str(class_name)), max_class_name_length)
class_count = max_class_index + 1
class_ids_array = np.empty(class_count, dtype=f"U{max_class_id_length}")
class_names_array = np.empty(class_count, dtype=f"U{max_class_name_length}")
for entry in entries_array:
class_index, class_id, class_name = (
entry["class_index"],
entry["class_id"],
entry["class_name"],
)
class_ids_array[class_index] = class_id
class_names_array[class_index] = class_name
logger.info(f'saving class IDs to "{self._class_ids_path}"')
self._save_extra(class_ids_array, self._class_ids_path)
logger.info(f'saving class names to "{self._class_names_path}"')
self._save_extra(class_names_array, self._class_names_path)
def dump_extra(self) -> None:
self._dump_entries()
self._dump_class_ids_and_names()
# Path: models/backbones/facebookresearch_dinov2_main/dinov2/dinov2/data/datasets/image_net_22k.py
class ImageNet22k(ExtendedVisionDataset):
_GZIPPED_INDICES: Set[int] = {
841_545,
1_304_131,
2_437_921,
2_672_079,
2_795_676,
2_969_786,
6_902_965,
6_903_550,
6_903_628,
7_432_557,
7_432_589,
7_813_809,
8_329_633,
10_296_990,
10_417_652,
10_492_265,
10_598_078,
10_782_398,
10_902_612,
11_203_736,
11_342_890,
11_397_596,
11_589_762,
11_705_103,
12_936_875,
13_289_782,
}
Labels = _Labels
def __init__(
self,
*,
root: str,
extra: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
mmap_cache_size: int = _DEFAULT_MMAP_CACHE_SIZE,
) -> None:
super().__init__(root, transforms, transform, target_transform)
self._extra_root = extra
entries_path = self._get_entries_path(root)
self._entries = self._load_extra(entries_path)
class_ids_path = self._get_class_ids_path(root)
self._class_ids = self._load_extra(class_ids_path)
self._gzipped_indices = ImageNet22k._GZIPPED_INDICES
self._mmap_tarball = _make_mmap_tarball(self._tarballs_root, mmap_cache_size)
def _get_entries_path(self, root: Optional[str] = None) -> str:
return "entries.npy"
def _get_class_ids_path(self, root: Optional[str] = None) -> str:
return "class-ids.npy"
def _find_class_ids(self, path: str) -> List[str]:
class_ids = []
with os.scandir(path) as entries:
for entry in entries:
root, ext = os.path.splitext(entry.name)
if ext != ".tar":
continue
class_ids.append(root)
return sorted(class_ids)
def _load_entries_class_ids(self, root: Optional[str] = None) -> Tuple[List[_Entry], List[str]]:
root = self.get_root(root)
entries: List[_Entry] = []
class_ids = self._find_class_ids(root)
for class_index, class_id in enumerate(class_ids):
path = os.path.join(root, "blocks", f"{class_id}.log")
class_entries = []
try:
with open(path) as f:
for line in f:
line = line.rstrip()
block, filename = line.split(":")
block_offset = int(block[6:])
filename = filename[1:]
maybe_filename = None
if filename != "** Block of NULs **":
maybe_filename = filename
_, ext = os.path.splitext(filename)
# assert ext == ".JPEG"
class_entry = _ClassEntry(block_offset, maybe_filename)
class_entries.append(class_entry)
except OSError as e:
raise RuntimeError(f'can not read blocks file "{path}"') from e
assert class_entries[-1].maybe_filename is None
for class_entry1, class_entry2 in zip(class_entries, class_entries[1:]):
assert class_entry1.block_offset <= class_entry2.block_offset
start_offset = 512 * class_entry1.block_offset
end_offset = 512 * class_entry2.block_offset
assert class_entry1.maybe_filename is not None
filename = class_entry1.maybe_filename
entry = _Entry(class_index, start_offset, end_offset, filename)
# Skip invalid image files (PIL throws UnidentifiedImageError)
if filename == "n06470073_47249.JPEG":
continue
entries.append(entry)
return entries, class_ids
def _load_extra(self, extra_path: str) -> np.ndarray:
extra_root = self._extra_root
extra_full_path = os.path.join(extra_root, extra_path)
return np.load(extra_full_path, mmap_mode="r")
def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
extra_root = self._extra_root
extra_full_path = os.path.join(extra_root, extra_path)
os.makedirs(extra_root, exist_ok=True)
np.save(extra_full_path, extra_array)
@property
def _tarballs_root(self) -> str:
return self.root
def find_class_id(self, class_index: int) -> str:
return str(self._class_ids[class_index])
def get_image_data(self, index: int) -> bytes:
entry = self._entries[index]
class_id = entry["class_id"]
class_mmap = self._mmap_tarball(class_id)
start_offset, end_offset = entry["start_offset"], entry["end_offset"]
try:
mapped_data = class_mmap[start_offset:end_offset]
data = mapped_data[512:] # Skip entry header block
if len(data) >= 2 and tuple(data[:2]) == (0x1F, 0x8B):
assert index in self._gzipped_indices, f"unexpected gzip header for sample {index}"
with GzipFile(fileobj=BytesIO(data)) as g:
data = g.read()
except Exception as e:
raise RuntimeError(f"can not retrieve image data for sample {index} " f'from "{class_id}" tarball') from e
return data
def get_target(self, index: int) -> Any:
return int(self._entries[index]["class_index"])
def get_targets(self) -> np.ndarray:
return self._entries["class_index"]
def get_class_id(self, index: int) -> str:
return str(self._entries[index]["class_id"])
def get_class_ids(self) -> np.ndarray:
return self._entries["class_id"]
def __getitem__(self, index: int) -> Tuple[Any, Any]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return super().__getitem__(index)
def __len__(self) -> int:
return len(self._entries)
def _dump_entries(self, *args, **kwargs) -> None:
entries, class_ids = self._load_entries_class_ids(*args, **kwargs)
max_class_id_length, max_filename_length, max_class_index = -1, -1, -1
for entry in entries:
class_id = class_ids[entry.class_index]
max_class_index = max(entry.class_index, max_class_index)
max_class_id_length = max(len(class_id), max_class_id_length)
max_filename_length = max(len(entry.filename), max_filename_length)
dtype = np.dtype(
[
("class_index", "<u4"),
("class_id", f"U{max_class_id_length}"),
("start_offset", "<u4"),
("end_offset", "<u4"),
("filename", f"U{max_filename_length}"),
]
)
sample_count = len(entries)
entries_array = np.empty(sample_count, dtype=dtype)
for i, entry in enumerate(entries):
class_index = entry.class_index
class_id = class_ids[class_index]
start_offset = entry.start_offset
end_offset = entry.end_offset
filename = entry.filename
entries_array[i] = (
class_index,
class_id,
start_offset,
end_offset,
filename,
)
entries_path = self._get_entries_path(*args, **kwargs)
self._save_extra(entries_array, entries_path)
def _dump_class_ids(self, *args, **kwargs) -> None:
entries_path = self._get_entries_path(*args, **kwargs)
entries_array = self._load_extra(entries_path)
max_class_id_length, max_class_index = -1, -1
for entry in entries_array:
class_index, class_id = entry["class_index"], entry["class_id"]
max_class_index = max(int(class_index), max_class_index)
max_class_id_length = max(len(str(class_id)), max_class_id_length)
class_ids_array = np.empty(max_class_index + 1, dtype=f"U{max_class_id_length}")
for entry in entries_array:
class_index, class_id = entry["class_index"], entry["class_id"]
class_ids_array[class_index] = class_id
class_ids_path = self._get_class_ids_path(*args, **kwargs)
self._save_extra(class_ids_array, class_ids_path)
def _dump_extra(self, *args, **kwargs) -> None:
self._dump_entries(*args, *kwargs)
self._dump_class_ids(*args, *kwargs)
def dump_extra(self, root: Optional[str] = None) -> None:
return self._dump_extra(root)
# Path: models/backbones/facebookresearch_dinov2_main/dinov2/dinov2/data/samplers.py
class EpochSampler(Sampler):
def __init__(
self,
*,
size: int,
sample_count: int,
shuffle: bool = False,
seed: int = 0,
start: Optional[int] = None,
step: Optional[int] = None,
):
self._size = size
self._sample_count = sample_count
self._shuffle = shuffle
self._seed = seed
self._start = distributed.get_global_rank() if start is None else start
self._step = distributed.get_global_size() if step is None else step
self._epoch = 0
def __iter__(self):
count = (self._size + self._sample_count - 1) // self._sample_count
tiled_indices = np.tile(np.arange(self._sample_count), count)
if self._shuffle:
seed = self._seed * self._epoch if self._seed != 0 else self._epoch
rng = np.random.default_rng(seed)
iterable = rng.choice(tiled_indices, self._size, replace=False)
else:
iterable = tiled_indices[: self._size]
yield from itertools.islice(iterable, self._start, None, self._step)
def __len__(self):
return (self._size - self._start + self._step - 1) // self._step
def set_epoch(self, epoch):
self._epoch = epoch
# Path: models/backbones/facebookresearch_dinov2_main/dinov2/dinov2/data/samplers.py
class InfiniteSampler(Sampler):
def __init__(
self,
*,
sample_count: int,
shuffle: bool = False,
seed: int = 0,
start: Optional[int] = None,
step: Optional[int] = None,
advance: int = 0,
):
self._sample_count = sample_count
self._seed = seed
self._shuffle = shuffle
self._start = distributed.get_global_rank() if start is None else start
self._step = distributed.get_global_size() if step is None else step
self._advance = advance
def __iter__(self):
if self._shuffle:
iterator = self._shuffled_iterator()
else:
iterator = self._iterator()
yield from itertools.islice(iterator, self._advance, None)
def _iterator(self):
assert not self._shuffle
while True:
iterable = range(self._sample_count)
yield from itertools.islice(iterable, self._start, None, self._step)
def _shuffled_iterator(self):
assert self._shuffle
# Instantiate a generator here (rather than in the ctor) to keep the class
# picklable (requirement of mp.spawn)
generator = torch.Generator().manual_seed(self._seed)
while True:
iterable = _generate_randperm_indices(size=self._sample_count, generator=generator)
yield from itertools.islice(iterable, self._start, None, self._step)
# Path: models/backbones/facebookresearch_dinov2_main/dinov2/dinov2/data/samplers.py
class ShardedInfiniteSampler(Sampler):
def __init__(
self,
*,
sample_count: int,
shuffle: bool = False,
seed: int = 0,
start: Optional[int] = None,
step: Optional[int] = None,
advance: int = 0,
use_new_shuffle_tensor_slice: bool = False,
):
self._sample_count = sample_count
self._seed = seed
self._shuffle = shuffle
self._start = distributed.get_global_rank() if start is None else start
self._step = distributed.get_global_size() if step is None else step
self._advance = advance
self._iter_count = 0
self._shuffle_tensor_slice_fn = (
_new_shuffle_tensor_slice if use_new_shuffle_tensor_slice else _shuffle_tensor_slice
)
def __iter__(self):
iter_count = self._advance // self._sample_count
if iter_count > 0:
self._advance -= iter_count * self._sample_count
self._iter_count += iter_count
if self._shuffle:
iterator = self._shuffled_iterator()
else:
iterator = self._iterator()
yield from itertools.islice(iterator, self._advance, None)
def _iterator(self):
assert not self._shuffle
while True:
iterable = range(self._sample_count)
yield from itertools.islice(iterable, self._start, None, self._step)
def _shuffled_iterator(self):
assert self._shuffle
# Instantiate a generator here (rather than in the ctor) to be keep the class
# picklable (requirement of mp.spawn)
generator = torch.Generator()
# Always shuffle everything first
generator.manual_seed(self._seed)
dtype = _get_torch_dtype(self._sample_count)
perm = torch.randperm(self._sample_count, dtype=dtype, generator=generator)
while True:
# Re-seed on each iteration to allow skipping whole permutations
seed = _make_seed(self._seed, self._start, self._iter_count)
generator.manual_seed(seed)
iterable = self._shuffle_tensor_slice_fn(
tensor=perm, start=self._start, step=self._step, generator=generator
)
yield from iterable
self._iter_count += 1
# Path: models/backbones/facebookresearch_dinov2_main/dinov2/dinov2/data/loaders.py
import logging
import torch
from enum import Enum
from typing import Any, Callable, List, Optional, TypeVar
from torch.utils.data import Sampler
from .datasets import ImageNet, ImageNet22k
from .samplers import EpochSampler, InfiniteSampler, ShardedInfiniteSampler
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
logger = logging.getLogger("dinov2")
class SamplerType(Enum):
DISTRIBUTED = 0
EPOCH = 1
INFINITE = 2
SHARDED_INFINITE = 3
SHARDED_INFINITE_NEW = 4
def _make_bool_str(b: bool) -> str:
return "yes" if b else "no"
def _make_sample_transform(image_transform: Optional[Callable] = None, target_transform: Optional[Callable] = None):
def transform(sample):
image, target = sample
if image_transform is not None:
image = image_transform(image)
if target_transform is not None:
target = target_transform(target)
return image, target
| return transform
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DQiaole/FlowDiffusion_pytorch
# Path: core/utils/frame_utils.py
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
def readPFM(file):
def writeFlow(filename,uv,v=None):
def readFlowKITTI(filename):
def readDispKITTI(filename):
def writeFlowKITTI(filename, uv):
def read_gen(file_name, pil=False):
# Path: core/utils/augmentor.py
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
""" Photometric augmentation """
# asymmetric
if np.random.rand() < self.asymmetric_color_aug_prob:
img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
# symmetric
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
""" Occlusion augmentation """
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def spatial_transform(self, img1, img2, flow):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 8) / float(ht),
(self.crop_size[1] + 8) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
scale_y = scale
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob or min_scale > 1:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = flow * [scale_x, scale_y]
if self.do_flip:
if np.random.rand() < self.h_flip_prob: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.v_flip_prob: # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
if img1.shape[0] == self.crop_size[0]:
y0 = 0
else:
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
if img1.shape[1] == self.crop_size[1]:
x0 = 0
else:
x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow = self.spatial_transform(img1, img2, flow)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
return img1, img2, flow
# Path: core/utils/augmentor.py
class SparseFlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
ht, wd = flow.shape[:2]
coords = np.meshgrid(np.arange(wd), np.arange(ht))
coords = np.stack(coords, axis=-1)
coords = coords.reshape(-1, 2).astype(np.float32)
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
yy = yy[v]
flow1 = flow1[v]
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
flow_img[yy, xx] = flow1
valid_img[yy, xx] = 1
return flow_img, valid_img
def spatial_transform(self, img1, img2, flow, valid, centre_crop, resize):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
scale_y = np.clip(scale, min_scale, None)
if np.random.rand() < self.spatial_aug_prob or min_scale > 1 or resize:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
if self.do_flip:
if np.random.rand() < 0.5: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
valid = valid[:, ::-1]
margin_y = 20
margin_x = 50
if centre_crop:
y0 = (img1.shape[0] - self.crop_size[0]) // 2
x0 = (img1.shape[1] - self.crop_size[1]) // 2
else:
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid, centre_crop=False, resize=False):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid, centre_crop, resize)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
valid = np.ascontiguousarray(valid)
return img1, img2, flow, valid
# Path: core/augmentations/augmentations.py
def get_augmentation_fn(aug_params):
aug_name = aug_params.name
if aug_name not in ALL_AUGMENTATIONS.keys():
raise NotImplementedError(
'Unrecognized augmentation: {}'.format(aug_name))
aug_fn = ALL_AUGMENTATIONS[aug_name]
return functools.partial(aug_fn, aug_params=aug_params)
# Path: core/augmentations/aug_params.py
def get_params(name):
aug_params = ml_collections.ConfigDict()
aug_params.name = name
"""Parameters controlling data augmentation."""
aug_params.crop_height = 320
aug_params.crop_width = 448
aug_params.eval_crop_height = 320
aug_params.eval_crop_width = 768
aug_params.noise_std_range = 0.06 # range for sampling std of additive noise
aug_params.crop_range_delta = 0.03 # range of relative translation of image 2
aug_params.flow_interpolation = "BILINEAR" # "NEAREST"
# control params
aug_params.is_schedule_coeff = True # schedule aug coeff for image 2
aug_params.schedule_coeff = 1.0
aug_params.is_channel_swapping = False # True: random swapping color channels
aug_params.is_augment_colors = True
aug_params.is_augment_spatial = True
aug_params.disable_ground_truth = False # True: set ground truth to invalid for semi-supervised training
aug_params.black = False # True: allow out-of-boundary cropping (Chairs)
aug_params.prob_hard_sample = 1.0 # probability that we use the hard sample technique, see line 87 in https://github.com/gengshan-y/VCN/blob/master/dataloader/robloader.py
aug_params.is_random_erasing = False
# spatial params
aug_params.min_scale = 0.2
aug_params.max_scale = 1.0
aug_params.vflip_prob = 0.0
aug_params.rot1 = 0.4
aug_params.squeeze1 = 0.3
aug_params.scale1 = 0.3
aug_params.tran1 = 0.4
aug_params.scale2 = 0.1
aug_params.lmult_factor = 1.
aug_params.sat_factor = 1.
aug_params.col_factor = 1.
aug_params.ladd_factor = 1.
aug_params.col_rot_factor = 1.
return aug_params
# Path: core/datasets_return_dict.py
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
import cv2
import os
import math
import random
import os.path as osp
from torch.utils.data.distributed import DistributedSampler
from glob import glob
from .utils import frame_utils
from .utils.augmentor import FlowAugmentor, SparseFlowAugmentor
from .augmentations.augmentations import get_augmentation_fn as it_get_augmentation_fn
from .augmentations.aug_params import get_params as it_get_params
# Data loading based on https://github.com/NVIDIA/flownet2-pytorch
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
class FlowDataset(data.Dataset):
def __init__(self, aug_params=None, sparse=False, it_aug=False, resize_for_test=False, kitti_format=False, n_sample=None):
self.augmentor = None
self.sparse = sparse
self.it_aug = it_aug
self.resize_for_test = resize_for_test
self.kitti_format = kitti_format
self.n_sample_per_scene = n_sample
if aug_params is not None:
if not it_aug and 'add_gaussian_noise' in aug_params:
aug_params.pop('add_gaussian_noise')
if it_aug:
params = it_get_params('pwc')
if not aug_params['add_gaussian_noise']:
params.noise_std_range = 0.0
print('params.noise_std_range:', params.noise_std_range)
self.augmentor = it_get_augmentation_fn(params)
elif sparse:
self.augmentor = SparseFlowAugmentor(**aug_params)
else:
self.augmentor = FlowAugmentor(**aug_params)
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
| self.extra_info = [] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Haoming02/sd-webui-old-photo-restoration
# Path: Global/test.py
def global_test(custom_args:list, ckpt_dir:str):
opt = TestOptions().parse(custom_args, save=False)
parameter_set(opt, ckpt_dir)
model = Pix2PixHDModel_Mapping()
model.initialize(opt)
model.eval()
if not os.path.exists(opt.outputs_dir + "/" + "input_image"):
os.makedirs(opt.outputs_dir + "/" + "input_image")
if not os.path.exists(opt.outputs_dir + "/" + "restored_image"):
os.makedirs(opt.outputs_dir + "/" + "restored_image")
if not os.path.exists(opt.outputs_dir + "/" + "origin"):
os.makedirs(opt.outputs_dir + "/" + "origin")
dataset_size = 0
input_loader = os.listdir(opt.test_input)
dataset_size = len(input_loader)
input_loader.sort()
if opt.test_mask != "":
mask_loader = os.listdir(opt.test_mask)
dataset_size = len(os.listdir(opt.test_mask))
mask_loader.sort()
img_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
mask_transform = transforms.ToTensor()
for i in range(dataset_size):
input_name = input_loader[i]
input_file = os.path.join(opt.test_input, input_name)
if not os.path.isfile(input_file):
print("Skipping non-file %s" % input_name)
continue
input = Image.open(input_file).convert("RGB")
print("Now you are processing %s" % (input_name))
if opt.NL_use_mask:
mask_name = mask_loader[i]
mask = Image.open(os.path.join(opt.test_mask, mask_name)).convert("RGB")
if opt.mask_dilation != 0:
kernel = np.ones((3,3),np.uint8)
mask = np.array(mask)
mask = cv2.dilate(mask,kernel,iterations = opt.mask_dilation)
mask = Image.fromarray(mask.astype('uint8'))
origin = input
input = irregular_hole_synthesize(input, mask)
mask = mask_transform(mask)
mask = mask[:1, :, :] ## Convert to single channel
mask = mask.unsqueeze(0)
input = img_transform(input)
input = input.unsqueeze(0)
else:
if opt.test_mode == "Scale":
input = data_transforms(input, scale=True)
if opt.test_mode == "Full":
input = data_transforms(input, scale=False)
if opt.test_mode == "Crop":
input = data_transforms_rgb_old(input)
origin = input
input = img_transform(input)
input = input.unsqueeze(0)
mask = torch.zeros_like(input)
### Necessary input
try:
with torch.no_grad():
generated = model.inference(input, mask)
except Exception as ex:
print("Skip %s due to an error:\n%s" % (input_name, str(ex)))
continue
if input_name.endswith(".jpg"):
input_name = input_name[:-4] + ".png"
image_grid = vutils.save_image(
(input + 1.0) / 2.0,
opt.outputs_dir + "/input_image/" + input_name,
nrow=1,
padding=0,
normalize=True,
)
image_grid = vutils.save_image(
(generated.data.cpu() + 1.0) / 2.0,
opt.outputs_dir + "/restored_image/" + input_name,
nrow=1,
padding=0,
normalize=True,
)
origin.save(opt.outputs_dir + "/origin/" + input_name)
# Path: Global/detection.py
def global_detection(custom_args:list):
parser = argparse.ArgumentParser()
parser.add_argument("--GPU", type=int, default=0)
parser.add_argument("--test_path", type=str, default=".")
parser.add_argument("--output_dir", type=str, default=".")
parser.add_argument("--input_size", type=str, default="scale_256", help="resize_256|full_size|scale_256")
config = parser.parse_args(custom_args)
main(config)
# Path: Face_Detection/detect_all_dlib.py
def detect(custom_args:list):
parser = argparse.ArgumentParser()
parser.add_argument("--url", type=str, default="", help="input")
parser.add_argument("--save_url", type=str, default="", help="output")
opts = parser.parse_args(custom_args)
url = opts.url
save_url = opts.save_url
os.makedirs(url, exist_ok=True)
os.makedirs(save_url, exist_ok=True)
face_detector = dlib.get_frontal_face_detector()
landmark = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'shape_predictor_68_face_landmarks.dat')
landmark_locator = dlib.shape_predictor(landmark)
count = 0
map_id = {}
for x in os.listdir(url):
img_url = os.path.join(url, x)
pil_img = Image.open(img_url).convert("RGB")
image = np.array(pil_img)
start = time.time()
faces = face_detector(image)
done = time.time()
if len(faces) == 0:
print("Warning: There is no face in %s" % (x))
continue
print(len(faces))
if len(faces) > 0:
for face_id in range(len(faces)):
current_face = faces[face_id]
face_landmarks = landmark_locator(image, current_face)
current_fl = search(face_landmarks)
affine = compute_transformation_matrix(image, current_fl, False, target_face_scale=1.3)
aligned_face = warp(image, affine, output_shape=(256, 256, 3))
img_name = x[:-4] + "_" + str(face_id + 1)
io.imsave(os.path.join(save_url, img_name + ".png"), img_as_ubyte(aligned_face))
count += 1
if count % 1000 == 0:
print("%d have finished ..." % (count))
# Path: Face_Detection/detect_all_dlib_HR.py
def detect_hr(custom_args:list):
parser = argparse.ArgumentParser()
parser.add_argument("--url", type=str, default="", help="input")
parser.add_argument("--save_url", type=str, default="", help="output")
opts = parser.parse_args(custom_args)
url = opts.url
save_url = opts.save_url
os.makedirs(url, exist_ok=True)
os.makedirs(save_url, exist_ok=True)
face_detector = dlib.get_frontal_face_detector()
landmark = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'shape_predictor_68_face_landmarks.dat')
landmark_locator = dlib.shape_predictor(landmark)
count = 0
map_id = {}
for x in os.listdir(url):
img_url = os.path.join(url, x)
pil_img = Image.open(img_url).convert("RGB")
image = np.array(pil_img)
start = time.time()
faces = face_detector(image)
done = time.time()
if len(faces) == 0:
print("Warning: There is no face in %s" % (x))
continue
print(len(faces))
if len(faces) > 0:
for face_id in range(len(faces)):
current_face = faces[face_id]
face_landmarks = landmark_locator(image, current_face)
current_fl = search(face_landmarks)
affine = compute_transformation_matrix(image, current_fl, False, target_face_scale=1.3)
aligned_face = warp(image, affine, output_shape=(512, 512, 3))
img_name = x[:-4] + "_" + str(face_id + 1)
io.imsave(os.path.join(save_url, img_name + ".png"), img_as_ubyte(aligned_face))
count += 1
if count % 1000 == 0:
print("%d have finished ..." % (count))
# Path: Face_Detection/align_warp_back_multiple_dlib.py
def align_warp(custom_args:list):
parser = argparse.ArgumentParser()
parser.add_argument("--origin_url", type=str, default="./", help="origin images")
parser.add_argument("--replace_url", type=str, default="./", help="restored faces")
parser.add_argument("--save_url", type=str, default="./save")
opts = parser.parse_args(custom_args)
origin_url = opts.origin_url
replace_url = opts.replace_url
save_url = opts.save_url
if not os.path.exists(save_url):
os.makedirs(save_url)
face_detector = dlib.get_frontal_face_detector()
landmark = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'shape_predictor_68_face_landmarks.dat')
landmark_locator = dlib.shape_predictor(landmark)
count = 0
for x in os.listdir(origin_url):
img_url = os.path.join(origin_url, x)
pil_img = Image.open(img_url).convert("RGB")
origin_width, origin_height = pil_img.size
image = np.array(pil_img)
start = time.time()
faces = face_detector(image)
done = time.time()
if len(faces) == 0:
print("Warning: There is no face in %s" % (x))
continue
blended = image
for face_id in range(len(faces)):
current_face = faces[face_id]
face_landmarks = landmark_locator(image, current_face)
current_fl = search(face_landmarks)
forward_mask = np.ones_like(image).astype("uint8")
affine = compute_transformation_matrix(image, current_fl, False, target_face_scale=1.3)
aligned_face = warp(image, affine, output_shape=(256, 256, 3), preserve_range=True)
forward_mask = warp(
forward_mask, affine, output_shape=(256, 256, 3), order=0, preserve_range=True
)
affine_inverse = affine.inverse
cur_face = aligned_face
if replace_url != "":
face_name = x[:-4] + "_" + str(face_id + 1) + ".png"
cur_url = os.path.join(replace_url, face_name)
restored_face = Image.open(cur_url).convert("RGB")
restored_face = np.array(restored_face)
cur_face = restored_face
## Histogram Color matching
A = cv2.cvtColor(aligned_face.astype("uint8"), cv2.COLOR_RGB2BGR)
B = cv2.cvtColor(cur_face.astype("uint8"), cv2.COLOR_RGB2BGR)
B = match_histograms(B, A)
cur_face = cv2.cvtColor(B.astype("uint8"), cv2.COLOR_BGR2RGB)
warped_back = warp(
cur_face,
affine_inverse,
output_shape=(origin_height, origin_width, 3),
order=3,
preserve_range=True,
)
backward_mask = warp(
forward_mask,
affine_inverse,
output_shape=(origin_height, origin_width, 3),
order=0,
preserve_range=True,
) ## Nearest neighbour
blended = blur_blending_cv2(warped_back, blended, backward_mask)
blended *= 255.0
io.imsave(os.path.join(save_url, x), img_as_ubyte(blended / 255.0))
count += 1
if count % 1000 == 0:
print("%d have finished ..." % (count))
# Path: Face_Detection/align_warp_back_multiple_dlib_HR.py
def align_warp_hr(custom_args:list):
parser = argparse.ArgumentParser()
parser.add_argument("--origin_url", type=str, default="./", help="origin images")
parser.add_argument("--replace_url", type=str, default="./", help="restored faces")
parser.add_argument("--save_url", type=str, default="./save")
opts = parser.parse_args(custom_args)
origin_url = opts.origin_url
replace_url = opts.replace_url
save_url = opts.save_url
if not os.path.exists(save_url):
os.makedirs(save_url)
face_detector = dlib.get_frontal_face_detector()
landmark = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'shape_predictor_68_face_landmarks.dat')
landmark_locator = dlib.shape_predictor(landmark)
count = 0
for x in os.listdir(origin_url):
img_url = os.path.join(origin_url, x)
pil_img = Image.open(img_url).convert("RGB")
origin_width, origin_height = pil_img.size
image = np.array(pil_img)
start = time.time()
faces = face_detector(image)
done = time.time()
if len(faces) == 0:
print("Warning: There is no face in %s" % (x))
continue
blended = image
for face_id in range(len(faces)):
current_face = faces[face_id]
face_landmarks = landmark_locator(image, current_face)
current_fl = search(face_landmarks)
forward_mask = np.ones_like(image).astype("uint8")
affine = compute_transformation_matrix(image, current_fl, False, target_face_scale=1.3)
aligned_face = warp(image, affine, output_shape=(512, 512, 3), preserve_range=True)
forward_mask = warp(
forward_mask, affine, output_shape=(512, 512, 3), order=0, preserve_range=True
)
affine_inverse = affine.inverse
cur_face = aligned_face
if replace_url != "":
face_name = x[:-4] + "_" + str(face_id + 1) + ".png"
cur_url = os.path.join(replace_url, face_name)
restored_face = Image.open(cur_url).convert("RGB")
restored_face = np.array(restored_face)
cur_face = restored_face
## Histogram Color matching
A = cv2.cvtColor(aligned_face.astype("uint8"), cv2.COLOR_RGB2BGR)
B = cv2.cvtColor(cur_face.astype("uint8"), cv2.COLOR_RGB2BGR)
B = match_histograms(B, A)
cur_face = cv2.cvtColor(B.astype("uint8"), cv2.COLOR_BGR2RGB)
warped_back = warp(
cur_face,
affine_inverse,
output_shape=(origin_height, origin_width, 3),
order=3,
preserve_range=True,
)
backward_mask = warp(
forward_mask,
affine_inverse,
output_shape=(origin_height, origin_width, 3),
order=0,
preserve_range=True,
) ## Nearest neighbour
blended = blur_blending_cv2(warped_back, blended, backward_mask)
blended *= 255.0
io.imsave(os.path.join(save_url, x), img_as_ubyte(blended / 255.0))
count += 1
if count % 1000 == 0:
print("%d have finished ..." % (count))
# Path: Face_Enhancement/test_face.py
def test_face(custom_args:list):
import sys
cache = sys.argv[1:]
sys.argv[1:] = custom_args
opt = TestOptions().parse()
sys.argv[1:] = cache
dataloader = create_dataloader(opt)
model = Pix2PixModel(opt)
model.eval()
visualizer = Visualizer(opt)
single_save_url = os.path.join(opt.checkpoints_dir, opt.name, opt.results_dir, "each_img")
if not os.path.exists(single_save_url):
os.makedirs(single_save_url)
for i, data_i in enumerate(dataloader):
if i * opt.batchSize >= opt.how_many:
break
generated = model(data_i, mode="inference")
img_path = data_i["path"]
for b in range(generated.shape[0]):
img_name = os.path.split(img_path[b])[-1]
save_img_url = os.path.join(single_save_url, img_name)
vutils.save_image((generated[b] + 1) / 2, save_img_url)
# Path: scripts/main_function.py
from Global.test import global_test
from Global.detection import global_detection
from Face_Detection.detect_all_dlib import detect
from Face_Detection.detect_all_dlib_HR import detect_hr
from Face_Detection.align_warp_back_multiple_dlib import align_warp
from Face_Detection.align_warp_back_multiple_dlib_HR import align_warp_hr
from Face_Enhancement.test_face import test_face
from modules import scripts
import datetime
import shutil
import os
import torch
return False
assert input_path != output_path
try:
assert (os.path.isabs(input_path) and os.path.isabs(output_path))
except AssertionError:
print('Path is not Absolute...')
return False
if not os.path.exists(input_path):
print('Input Path does not Exist!')
return False
if not os.path.exists(output_path):
print('Output Path does not Exist!')
return False
if len(input_path.split()) > 1:
print('Empty spaces detected in Input Path!')
return False
if len(output_path.split()) > 1:
print('Empty spaces detected in Output Path!')
return False
if len(os.listdir(input_path)) == 0:
print('No files found in Input Path!')
return False
return True
def core_functions(input_path:str, output_path:str, gpu_id:int, scratch:bool, hr:bool, face_res:bool):
final_output = os.path.join(output_path, 'final_output')
if not os.path.exists(final_output):
os.makedirs(final_output)
if not torch.cuda.is_available():
gpu_id = -1
# ===== Stage 1 =====
print("Running Stage 1: Overall restoration")
stage1_output = os.path.join(output_path, 'stage1')
if not scratch:
args = ['--test_mode', 'Full', '--Quality_restore', '--test_input', input_path, '--outputs_dir', stage1_output, '--gpu_ids', str(gpu_id)]
global_test(args, GLOBAL_CHECKPOINTS_FOLDER)
else:
mask_dir = os.path.join(stage1_output, "masks")
new_input = os.path.join(mask_dir, "input")
new_mask = os.path.join(mask_dir, "mask")
args = ['--test_path', input_path, '--output_dir', mask_dir, '--input_size', 'full_size', '--GPU', str(gpu_id)]
global_detection(args)
args = ['--Scratch_and_Quality_restore', '--test_input', new_input, '--test_mask', new_mask, '--outputs_dir', stage1_output, '--gpu_ids', str(gpu_id)]
if hr:
args.append('--HR')
global_test(args, GLOBAL_CHECKPOINTS_FOLDER)
stage1_results = os.path.join(stage1_output, "restored_image")
for FILE in os.listdir(stage1_results):
shutil.copy(os.path.join(stage1_results, FILE), final_output)
if not face_res:
print("Processing is done. Please check the results.")
return final_output
# ===== Stage 2 =====
print("Running Stage 2: Face Detection")
stage2_output = os.path.join(output_path, 'stage2')
if hr:
detect_hr(['--url', stage1_results, '--save_url', stage2_output])
else:
detect(['--url', stage1_results, '--save_url', stage2_output])
# ===== Stage 3 =====
print("Running Stage 3: Face Enhancement")
stage_3_input_face = stage2_output
stage_3_output_dir = os.path.join(output_path, 'stage3')
if hr:
args = [
'--checkpoints_dir', FACE_CHECKPOINTS_FOLDER, '--old_face_folder', stage_3_input_face,
'--name', FACE_ENHANCEMENT_CHECKPOINTS[1], '--gpu_ids', str(gpu_id),
'--load_size', '512', '--label_nc', '18', '--no_instance', '--preprocess_mode', 'resize',
'--batchSize', '1', '--results_dir', stage_3_output_dir, '--no_parsing_map'
]
else:
args = [
'--checkpoints_dir', FACE_CHECKPOINTS_FOLDER, '--old_face_folder', stage_3_input_face,
'--name', FACE_ENHANCEMENT_CHECKPOINTS[0], '--gpu_ids', str(gpu_id),
'--load_size', '256', '--label_nc', '18', '--no_instance', '--preprocess_mode', 'resize',
'--batchSize', '4', '--results_dir', stage_3_output_dir, '--no_parsing_map'
]
test_face(args)
stage3_results = os.path.join(stage_3_output_dir, "each_img")
# ===== Stage 4 =====
print("Running Stage 4: Blending")
args = ['--origin_url', stage1_results, '--replace_url', stage3_results, '--save_url', final_output]
if hr:
align_warp_hr(args)
else:
align_warp(args)
print("All the processing is done. Please check the results.")
return final_output
def bop(input_path:str, output_path:str, gpu_id:int, scratch:bool, hr:bool, face_res:bool, del_itr:bool):
if not validate_paths(input_path, output_path):
return []
| output_path = os.path.join(output_path, datetime.datetime.now().strftime("%m-%d %H.%M.%S")) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: EQ-bench/EQ-Bench
# Path: lib/load_model.py
def load_model(base_model_path, lora_path, quantization, trust_remote_code = False):
tokenizer = AutoTokenizer.from_pretrained(base_model_path, trust_remote_code=trust_remote_code)
# This is for llama2 models, but doesn't seem to have
# adverse effects on benchmarks for other models.
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
# Quantization Config
if quantization == '4bit':
# load as 4 bit
quant_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=False
)
elif quantization == '8bit':
# load as 8 bit
quant_config = BitsAndBytesConfig(
load_in_8bit=True,
)
else:
quant_config = None
# Model
if quant_config:
base_model = AutoModelForCausalLM.from_pretrained(
base_model_path,
quantization_config=quant_config,
device_map={"": 0},
trust_remote_code=trust_remote_code
)
else:
base_model = AutoModelForCausalLM.from_pretrained(
base_model_path,
device_map={"": 0},
trust_remote_code=trust_remote_code
)
if lora_path:
peft_model = PeftModel.from_pretrained(base_model, lora_path)
return peft_model, tokenizer
else:
return base_model, tokenizer
# Path: lib/db.py
def save_result_to_db(results, score, parseable, last_error, run_index, bench_success):
global db
if not db:
return
try:
meta = results['run_metadata']
if meta['eq_bench_version'] == 'v1':
n_questions_total = 60
else:
n_questions_total = 171
raw_results = {}
for i in range(meta['total_iterations']):
iter_index = str(i+1)
if iter_index in results['iterations']:
if meta['eq_bench_version'] == 'v1':
individual_scores = results['iterations'][iter_index]['individual_scores']
else:
individual_scores = results['iterations'][iter_index]['individual_scores_fullscale']
raw_results[iter_index] = {
'respondent_answers': results['iterations'][iter_index]['respondent_answers'],
'individual_scores': individual_scores,
'raw_inference': results['iterations'][iter_index]['raw_inference']
}
to_save={
'index_string': run_index,
'run_id': meta['run_id'],
'run_completed': int(time.time()),
'benchmark_success': bench_success,
'eqbench_version': meta['eq_bench_version'],
'n_questions_parseable': parseable,
'n_questions_total': n_questions_total,
'benchmark_score': score,
'instruction_template': meta['instruction_template'],
'model_path': meta['model_path'],
'lora_path': meta['lora_path'],
'bitsandbytes_quant': meta['bitsandbytes_quant'],
'total_iterations': meta['total_iterations'],
'inference_engine': meta['inference_engine'],
'ooba_params': meta['ooba_params'],
'include_patterns': meta['include_patterns'],
'exclude_patterns': meta['exclude_patterns'],
'errors': last_error,
'raw_results': raw_results
}
db.collection("benchmark_results").add(to_save)
print('Results saved to firebase db.')
except Exception as e:
print(e)
print('! Failed to save results to db.')
# Path: lib/scoring.py
def calculate_score(reference, user):
# First check that the emotions specified in the answer match those in the reference
if len(user.items()) != 4:
print('! Error: 4 emotions were not returned')
print(user)
return None
emotions_dict = {}
for emotion, user_emotion_score in user.items():
for i in range(1, 5):
if emotion == reference[f'emotion{i}']:
emotions_dict[emotion] = True
if len(emotions_dict) != 4:
print('! Error: emotions did not match reference')
print(user)
return None
# Normalize the user's scores to sum to 10.
total_user_score = sum(float(score) for score in user.values())
if total_user_score <= 0:
print('Error: total of scores must be > 0')
print(user)
return None
user = {emotion: float(score) / total_user_score * 10 for emotion, score in user.items()}
difference_tally = 0 # Tally of differerence from reference answers for this question
# Iterate over each emotion in the user's answers.
for emotion, user_emotion_score in user.items():
# If this emotion is in the reference, calculate the difference between the user's score and the reference score.
for i in range(1, 5):
if emotion == reference[f'emotion{i}']:
difference_tally += abs(user_emotion_score - reference[f'emotion{i}_score'])
# Inverting the difference tally so that the closer the answer is to reference, the higher the score.
# We subtract from 10 because it works out that this constant produces a score of 0 when answering
# randomly, which is a useful floor for the benchmark.
final_score = 10 - difference_tally
return final_score
# Path: lib/scoring.py
def calculate_score_fullscale(reference, user):
# First check that the emotions specified in the answer match those in the reference
if len(user.items()) != 4:
#print('! Error: 4 emotions were not returned')
#print(user)
return None
emotions_dict = {}
for emotion, user_emotion_score in user.items():
for i in range(1, 5):
if emotion == reference[f'emotion{i}']:
emotions_dict[emotion] = True
if len(emotions_dict) != 4:
print('! Error: emotions did not match reference')
print(user)
return None
difference_tally = 0 # Tally of differerence from reference answers for this question
# Iterate over each emotion in the user's answers.
for emotion, user_emotion_score in user.items():
# If this emotion is in the reference, calculate the difference between the user's score and the reference score.
for i in range(1, 5):
if emotion == reference[f'emotion{i}']:
d = abs(float(user_emotion_score) - float(reference[f'emotion{i}_score']))
# this will be a value between 0 and 10
if d == 0:
scaled_difference = 0
elif d <= 5:
# S-shaped scaling function
# https://www.desmos.com/calculator
# 6.5\cdot\ \frac{1}{\left(1\ +\ e^{\left(-1.2\cdot\left(x-4\right)\right)}\right)}
scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d-4))))
else:
scaled_difference = d
difference_tally += scaled_difference
# Inverting the difference tally so that the closer the answer is to reference, the higher the score.
# The adjustment constant is chosen such that answering randomly produces a score of zero.
adjust_const = 0.7477
final_score = 10 - (difference_tally * adjust_const)
return final_score
# Path: lib/scoring.py
def parse_answers(text, REVISE):
first_pass_answers = {}
revised_answers = {}
# Extracting first pass answers
if REVISE:
first_pass_match = re.search(r'First pass scores:(.*?)Revised scores:', text, re.DOTALL)
if first_pass_match:
first_pass_text = first_pass_match.group(1)
first_pass_answers = dict(re.findall(r'(\w+):\s+(\d+)', first_pass_text))
# Extracting revised answers
revised_match = re.search(r'Revised scores:(.*?)$', text, re.DOTALL)
if revised_match:
revised_text = revised_match.group(1)
revised_answers = dict(re.findall(r'(\w+):\s+(\d+)', revised_text))
else:
first_pass_answers = dict(re.findall(r'(\w+):\s+(\d+)', text))
revised_answers = {}
return first_pass_answers, revised_answers
# Path: lib/scoring.py
def calculate_benchmark_score(run_index, results, results_path, fullscale=False):
# We calculate an overall score for first pass answers and revised answers separately.
# The final score is the best of these two numbers.
if fullscale: # v2
scores_key = 'individual_scores_fullscale'
else: # v1 (normalised)
scores_key = 'individual_scores'
score_tally = 0
parseable_tally = 0
n_iterations = len(results[run_index]['iterations'])
for run_iter in results[run_index]['iterations']:
score_sum_first_pass = 0
score_sum_revised = 0
first_pass_parseable = 0
revised_parseable = 0
if not scores_key in results[run_index]['iterations'][run_iter]:
continue
for dialogue_id, r in results[run_index]['iterations'][run_iter][scores_key].items():
r = results[run_index]['iterations'][run_iter][scores_key][dialogue_id]
if 'first_pass_score' in r and r['first_pass_score'] != None:
score_sum_first_pass += r['first_pass_score']
first_pass_parseable += 1
if 'revised_score' in r and r['revised_score'] != None:
score_sum_revised += r['revised_score']
revised_parseable += 1
if first_pass_parseable:
score_first_pass = 100 * (score_sum_first_pass / first_pass_parseable / 10)
else:
score_first_pass = 0
if revised_parseable:
score_revised = 100 * (score_sum_revised / revised_parseable / 10)
else:
score_revised = 0
# If either the first pass or revised score has significantly less parseable answers,
# we take the score with the higher number of parseable answers regardless of score.
if score_revised >= score_first_pass and revised_parseable >= 0.95 * first_pass_parseable:
final_score = score_revised
final_parseable = revised_parseable
else:
final_score = score_first_pass
final_parseable = first_pass_parseable
score_tally += final_score
parseable_tally += final_parseable
results_key = 'benchmark_results'
if fullscale:
results_key = 'benchmark_results_fullscale'
results[run_index]['iterations'][run_iter][results_key] = {
'first_pass_score': score_first_pass,
'first_pass_parseable': first_pass_parseable,
'revised_score': score_revised,
'revised_parseable': revised_parseable,
'final_score': final_score,
'final_parseable': final_parseable
}
averaged_score = score_tally / n_iterations
if fullscale:
averaged_score = round(averaged_score, 2)
else:
averaged_score = round(averaged_score, 2)
with open(results_path, 'w') as f:
json.dump(results, f)
return (averaged_score, round(parseable_tally / n_iterations, 2))
# Path: lib/run_query.py
def run_query(model_path, prompt_format, prompt, history, completion_tokens, model, tokenizer, temp, inference_engine, ooba_instance, launch_ooba, ooba_request_timeout, openai_client):
if inference_engine == 'openai':
return run_openai_query(prompt, history, completion_tokens, temp, model_path, openai_client)
elif inference_engine == 'ooba':
return run_ooba_query(prompt, history, prompt_format, completion_tokens, temp, ooba_instance, launch_ooba, ooba_request_timeout)
else: # transformers
# figure out the correct inference method to use
if model_path in OPENSOURCE_MODELS_INFERENCE_METHODS:
inference_fn = OPENSOURCE_MODELS_INFERENCE_METHODS[model_path]
else:
inference_fn = run_pipeline_query
formatted_prompt = generate_prompt_from_template(prompt, prompt_format)
return inference_fn(formatted_prompt, completion_tokens, model, tokenizer, temp)
# Path: lib/util.py
def upload_results_google_sheets(google_spreadsheet_url, result):
match = re.search(r'/spreadsheets/d/([a-zA-Z0-9-_]+)', google_spreadsheet_url)
sheet_id = match.group(1) if match else None
if not sheet_id:
print('! Error: failed to parse sheet id from url')
return
# Load credentials
scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/spreadsheets", "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name('./google_creds.json', scope)
client = gspread.authorize(creds)
sheet = client.open_by_key(sheet_id)
worksheet = sheet.get_worksheet(0)
worksheet.append_row(result)
# Path: lib/util.py
def delete_symlinks_and_dir(dir_to_delete, verbose):
# Check if the directory exists
if not os.path.exists(dir_to_delete):
print(f"Directory {dir_to_delete} does not exist.")
return
# Iterate through the items in the directory
for item in os.listdir(dir_to_delete):
item_path = os.path.join(dir_to_delete, item)
# Check if the item is a symlink
if os.path.islink(item_path):
source_path = os.readlink(item_path)
# Resolve the source path relative to the symlink's directory
if not os.path.isabs(source_path):
source_path = os.path.join(os.path.dirname(item_path), source_path)
# Check if the source file exists and is not a directory
if os.path.exists(source_path) and not os.path.isdir(source_path):
if verbose:
print(f"Deleting source file of symlink: {source_path}")
os.remove(source_path)
else:
print(f"Source file does not exist or is a directory: {source_path}")
# Delete the directory and its contents
shutil.rmtree(dir_to_delete)
if verbose:
print(f"Deleted directory: {dir_to_delete}")
# Path: lib/run_bench.py
import re
import os
import time
import json
import datetime
import lib.ooba
from tqdm import tqdm
from lib.load_model import load_model
from lib.db import save_result_to_db
from lib.scoring import calculate_score, calculate_score_fullscale, parse_answers, calculate_benchmark_score
from lib.run_query import run_query
from lib.util import upload_results_google_sheets, delete_symlinks_and_dir
# Constants
COMPLETION_TOKENS = 1000
RAW_RESULTS_PATH = './raw_results.json'
BENCH_RESULTS_PATH = './benchmark_results.csv'
REVISE=True
def run_benchmark(run_id, model_path, lora_path, prompt_type, quantization,
n_iterations, resume=True, delete_cache=False,
max_bench_retries=5, n_question_attempts=5, verbose=False,
google_spreadsheet_url='', trust_remote_code=False,
| inference_engine='transformers', ooba_instance=None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Mark12Ding/STA
# Path: kinetics.py
class VideoClsDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self, anno_path, data_path, mode='train', clip_len=8,
frame_sample_rate=2, crop_size=224, short_side_size=256,
new_height=256, new_width=340, keep_aspect_ratio=True,
num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,args=None):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.frame_sample_rate = frame_sample_rate
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.args = args
self.aug = False
self.rand_erase = False
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError("Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.label_array = list(cleaned.values[:, 1])
# self.dataset_samples = []
# self.label_array = []
# with open(self.anno_path, 'r') as f:
# for line in f:
# data_path, label = line.rstrip().split(' ')
# self.dataset_samples.append(os.path.join(os.path.dirname(self.anno_path), 'videos_val', data_path))
# self.label_array.append(int(label))
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(self.short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(size=(short_side_size), interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
sample_label = self.label_array[idx]
self.test_label_array.append(sample_label)
self.test_dataset.append(self.dataset_samples[idx])
self.test_seg.append((ck, cp))
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during training".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during validation".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \
/ (self.test_num_segment - 1), 0)
temporal_start = int(chunk_nb * temporal_step)
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = video_transforms.create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [
transforms.ToPILImage()(frame) for frame in buffer
]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(
buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=self.crop_size,
random_horizontal_flip=False if args.data_set == 'SSV2' else True ,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False
)
return buffer
def loadvideo_decord(self, sample, sample_rate_scale=1):
"""Load video content using Decord"""
fname = sample
if not (os.path.exists(fname)):
return []
# avoid hanging issue
if os.path.getsize(fname) < 1 * 1024:
print('SKIP: ', fname, " - ", os.path.getsize(fname))
return []
try:
if self.keep_aspect_ratio:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
vr = VideoReader(fname, width=self.new_width, height=self.new_height,
num_threads=1, ctx=cpu(0))
except:
print("video cannot be loaded by decord: ", fname)
return []
if self.mode == 'test':
all_index = [x for x in range(0, len(vr), self.frame_sample_rate)]
while len(all_index) < self.clip_len:
all_index.append(all_index[-1])
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
converted_len = int(self.clip_len * self.frame_sample_rate)
seg_len = len(vr) // self.num_segment
all_index = []
for i in range(self.num_segment):
if seg_len <= converted_len:
index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)
index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
else:
end_idx = np.random.randint(converted_len, seg_len)
str_idx = end_idx - converted_len
index = np.linspace(str_idx, end_idx, num=self.clip_len)
index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)
index = index + i*seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
# Path: ssv2.py
class SSVideoClsDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self, anno_path, data_path, mode='train', clip_len=8,
crop_size=224, short_side_size=256, new_height=256,
new_width=340, keep_aspect_ratio=True, num_segment=1,
num_crop=1, test_num_segment=10, test_num_crop=3, args=None):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.args = args
self.aug = False
self.rand_erase = False
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError("Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.label_array = list(cleaned.values[:, 1])
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(self.short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(size=(short_side_size), interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
sample_label = self.label_array[idx]
self.test_label_array.append(sample_label)
self.test_dataset.append(self.dataset_samples[idx])
self.test_seg.append((ck, cp))
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during training".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during validation".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_start = chunk_nb # 0/1
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start::2, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start::2, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = video_transforms.create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [
transforms.ToPILImage()(frame) for frame in buffer
]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(
buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=self.crop_size,
random_horizontal_flip=False if args.data_set == 'SSV2' else True,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False
)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3)
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3)
return buffer
def loadvideo_decord(self, sample, sample_rate_scale=1):
"""Load video content using Decord"""
fname = sample
if not (os.path.exists(fname)):
return []
# avoid hanging issue
if os.path.getsize(fname) < 1 * 1024:
print('SKIP: ', fname, " - ", os.path.getsize(fname))
return []
try:
if self.keep_aspect_ratio:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
vr = VideoReader(fname, width=self.new_width, height=self.new_height,
num_threads=1, ctx=cpu(0))
except:
print("video cannot be loaded by decord: ", fname)
return []
if self.mode == 'test':
all_index = []
tick = len(vr) / float(self.num_segment)
all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +
[int(tick * x) for x in range(self.num_segment)]))
while len(all_index) < (self.num_segment * self.test_num_segment):
all_index.append(all_index[-1])
all_index = list(np.sort(np.array(all_index)))
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
average_duration = len(vr) // self.num_segment
all_index = []
if average_duration > 0:
all_index += list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,
size=self.num_segment))
elif len(vr) > self.num_segment:
all_index += list(np.sort(np.random.randint(len(vr), size=self.num_segment)))
else:
all_index += list(np.zeros((self.num_segment,)))
all_index = list(np.array(all_index))
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
# Path: datasets.py
import os
from torchvision import transforms
from transforms import *
from kinetics import VideoClsDataset
from ssv2 import SSVideoClsDataset
def build_dataset(is_train, test_mode, args):
if args.data_set == 'Kinetics-400':
mode = None
anno_path = None
if is_train is True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode is True:
mode = 'test'
# anno_path = os.path.join(args.data_path, 'kinetics400_val_list_videos.txt')
anno_path = os.path.join(args.data_path, 'test.csv')
else:
mode = 'validation'
# anno_path = os.path.join(args.data_path, 'kinetics400_val_list_videos.txt')
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(
anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 400
elif args.data_set == 'SSV2':
mode = None
anno_path = None
if is_train is True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode is True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'test.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = SSVideoClsDataset(
anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=1,
num_segment=args.num_frames,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
nb_classes = 174
elif args.data_set == 'UCF101':
mode = None
anno_path = None
if is_train is True:
mode = 'train'
anno_path = os.path.join(args.data_path, 'train.csv')
elif test_mode is True:
mode = 'test'
anno_path = os.path.join(args.data_path, 'test.csv')
else:
mode = 'validation'
anno_path = os.path.join(args.data_path, 'val.csv')
dataset = VideoClsDataset(
anno_path=anno_path,
data_path='/',
mode=mode,
clip_len=args.num_frames,
frame_sample_rate=args.sampling_rate,
num_segment=1,
test_num_segment=args.test_num_segment,
test_num_crop=args.test_num_crop,
num_crop=1 if not test_mode else 3,
keep_aspect_ratio=True,
crop_size=args.input_size,
short_side_size=args.short_side_size,
new_height=256,
new_width=320,
args=args)
| nb_classes = 101 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SEU-ProactiveSecurity-Group/MalPurifier
# Path: core/attack/base_attack.py
class BaseAttack(Module):
"""
攻击的抽象类
参数
---------
@param is_attacker, Boolean, 扮演攻击者的角色(注意:防御者进行对抗性训练)
@param oblivion, Boolean, 是否知道敌手的指标
@param kappa, float, 攻击信心值
@param manipulation_x, boolean 向量显示可修改的APIs
@param omega, 由4个集合组成的列表,每个集合都包含与每个api对应的相互依赖的api的索引
@param device, 'cpu' 或 'cuda'
"""
def __init__(self, is_attacker=True, oblivion=False, kappa=1., manipulation_x=None, omega=None, device=None):
# 调用父类的构造函数
super(BaseAttack, self).__init__()
# 是否是攻击者
self.is_attacker = is_attacker
# 是否知道对手的指标
self.oblivion = oblivion
# 攻击的信心值
self.kappa = kappa
# 可修改的APIs
self.manipulation_x = manipulation_x
# 运行设备,CPU或CUDA(GPU)
self.device = device
# 与每个api对应的相互依赖的api的索引集合
self.omega = omega
# 反向特征的对象
self.inverse_feature = InverseDroidFeature()
# 进行初始化操作
self.initialize()
def initialize(self):
# 判断是否指定了可操作的APIs
if self.manipulation_x is None:
# 未指定时,从inverse_feature获取默认的可操作APIs
self.manipulation_x = self.inverse_feature.get_manipulation()
# 将manipulation_x转为LongTensor类型,并移动到指定的设备上(CPU或GPU)
self.manipulation_x = torch.LongTensor(self.manipulation_x).to(self.device)
# 判断是否指定了与每个api对应的相互依赖的api的索引
if self.omega is None:
# 未指定时,从inverse_feature获取默认的相互依赖的APIs
self.omega = self.inverse_feature.get_interdependent_apis()
# 使用one_hot编码处理self.omega,并计算每列的和,将结果存入self.omega中
# 这样每个API的值就表示它依赖于哪些APIs
self.omega = torch.sum(
F.one_hot(torch.tensor(self.omega), num_classes=len(self.inverse_feature.vocab)),
dim=0).to(self.device)
# 获取API标志位
api_flag = self.inverse_feature.get_api_flag()
# 将API标志位转为布尔类型的LongTensor,并移动到指定的设备上
self.api_flag = torch.LongTensor(api_flag).bool().to(self.device)
def perturb(self, model, x, adj=None, label=None):
"""
扰动节点特征向量
参数
--------
@param model: 被攻击的模型
@param x: torch.FloatTensor, 节点特征向量,每个都代表一个API
@param adj: torch.FloatTensor或None, 邻接矩阵(如果不为None,则其形状是[number_of_graphs, batch_size, vocab_dim, vocab_dim])
@param label: torch.LongTensor, 真实的标签
"""
# 这是一个抽象方法,需要在子类中进行具体实现
raise NotImplementedError
def produce_adv_mal(self, x_mod_list, feature_path_list, app_dir, save_dir=None):
"""
在实践中生成对抗性恶意软件。
参数
--------
@param x_mod_list: tensor的列表,每个tensor对应于应用于特性的数值修改。
@param feature_path_list: 特征路径的列表,每个路径对应于调用图保存的文件。
@param app_dir: 字符串,指向原始恶意应用的目录(或路径列表)。
@param save_dir: 用于保存生成的APK的目录。
"""
if len(x_mod_list) <= 0:
return
assert len(x_mod_list) == len(feature_path_list)
assert isinstance(x_mod_list[0], (torch.Tensor, np.ndarray))
# 如果未指定保存目录,则默认为'/tmp/adv_mal_cache'
if save_dir is None:
save_dir = os.path.join('/tmp/', 'adv_mal_cache')
if not os.path.exists(save_dir):
utils.mkdir(save_dir)
# 将修改转换为具体的指令
x_mod_instructions = [self.inverse_feature.inverse_map_manipulation(x_mod)
for x_mod in x_mod_list]
# 根据提供的应用目录或应用路径列表,获取具体的应用路径
if os.path.isdir(app_dir):
app_path_list = [os.path.join(app_dir, os.path.basename(os.path.splitext(feat_p)[0]))
for feat_p in feature_path_list]
elif isinstance(app_dir, list):
app_path_list = app_dir
else:
raise ValueError("期望应用目录或路径列表,但得到 {}.".format(type(app_dir)))
assert np.all([os.path.exists(app_path) for app_path in app_path_list]), "找不到所有的应用路径。"
# 准备多进程参数
pargs = [(x_mod_instr, feature_path, app_path, save_dir)
for x_mod_instr, feature_path, app_path in zip(x_mod_instructions,
feature_path_list, app_path_list)
if not os.path.exists(os.path.join(save_dir,
os.path.splitext(os.path.basename(app_path))[0] + '_adv'))]
# 设置进程数,至少为1
cpu_count = multiprocessing.cpu_count() - 2 if multiprocessing.cpu_count() - 2 > 1 else 1
pool = multiprocessing.Pool(cpu_count, initializer=utils.pool_initializer)
# 并行处理,按顺序保持
for res in pool.map(InverseDroidFeature.modify_wrapper, pargs):
if isinstance(res, Exception):
logger.exception(res)
pool.close()
pool.join()
def check_lambda(self, model):
"""
检查模型是否具有检测功能并是否知道关于对手的信息。
"""
if hasattr(model, 'is_detector_enabled') and (not self.oblivion):
return True
else:
return False
def get_loss(self, model, adv_x, label, lambda_=None):
# 如果模型有'is_detector_enabled'属性,说明模型不仅仅是分类器,还有检测器的功能。
if hasattr(model, 'is_detector_enabled'):
logits_f, prob_g = model.forward(adv_x)
else:
# 否则,我们只从模型获取分类的logits
logits_f = model.forward(adv_x)
# print(type(logits_f))
# print("logits_f.shape:", logits_f.shape)
# 计算交叉熵损失,其中'reduction='none''意味着对每个样本都计算损失,而不是求平均
ce = F.cross_entropy(logits_f, label, reduction='none')
# 获取模型的预测类别
y_pred = logits_f.argmax(1)
# 如果模型有检测器功能,并且我们没有选择'oblivion'(即我们知道对方是否是对手)
if hasattr(model, 'is_detector_enabled') and (not self.oblivion):
assert lambda_ is not None
# 获取每个样本的tau值
tau = model.get_tau_sample_wise(y_pred)
# 根据是否为攻击者,计算不同的损失
if self.is_attacker:
loss_no_reduction = ce + lambda_ * (torch.clamp(tau - prob_g, max=self.kappa))
else:
loss_no_reduction = ce + lambda_ * (tau - prob_g)
# 判断哪些样本是被成功攻击的
done = (y_pred != label) & (prob_g <= tau)
else:
# 如果没有检测器功能,损失只是交叉熵
loss_no_reduction = ce
# 判断哪些样本是被成功攻击的
done = y_pred != label
return loss_no_reduction, done
def get_scores(self, model, pertb_x, label, lmda=1.):
# 如果模型有'is_detector_enabled'属性,说明模型不仅仅是分类器,还有检测器的功能。
if hasattr(model, 'is_detector_enabled'):
logits_f, prob_g = model.forward(pertb_x)
else:
# 否则,我们只从模型获取分类的logits
logits_f = model.forward(pertb_x)
# 获取模型的预测类别
y_pred = logits_f.argmax(1)
# 如果模型有检测器功能,并且我们没有选择'oblivion'
if hasattr(model, 'is_detector_enabled') and (not self.oblivion):
# 获取每个样本的tau值
tau = model.get_tau_sample_wise(y_pred)
# 判断哪些样本是被成功攻击的
done = (y_pred != label) & (prob_g <= tau)
else:
# 判断哪些样本是被成功攻击的
done = y_pred != label
return done
# Path: tools/utils.py
ENC_KEY = 'cab228a122d3486bac7fab148e8b5aba'
MSG = "No such directory or file {} exists!".format(sample_dir)
MSG = "A directory or a list of paths are allowed!"
def pool_initializer():
def retrive_files_set(base_dir, dir_ext, file_ext):
def get_file_name(root_dir, file_ext):
def check_dir(sample_dir):
def dump_joblib(data, path):
def read_joblib(path):
def load_json(json_path):
def dump_json(obj_dict, file_path):
def dump_pickle(data, path, use_gzip=False):
def read_pickle(path, use_gzip=False):
def dump_pickle_frd_space(data, path):
def read_pickle_frd_space(path):
def dump_list_of_lists(data, path):
def read_list_of_lists(path):
def mkdir(target):
def read_txt(path, mode='r'):
def dump_txt(data_str, path, mode='w'):
def read_file_by_fileinput(file_path, inplace=True):
def __init__(self, manager, use_cache=True):
def is_cached(self, key):
def reset(self):
def get(self, key):
def cache(self, key, img, lbl):
def build_kwargs(keys, arg_dict):
def inverse_kwargs(vars):
def save_args(fout, args):
def load_args(fout):
def get_group_args(args, args_parser, title):
def tensor_coo_sp_to_ivs(sparse_tensor):
def ivs_to_tensor_coo_sp(ivs, device='cpu'):
def sp_to_symmetric_sp(sparse_mx):
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
def to_tensor(feature_x=None, labels=None, device='cpu'):
def _to_torch_tensor(mat):
def to_device(feature_x=None, labels=None, device='cpu'):
def psn(x_tensor, prob, lower_value=0., upper_value=1.):
def __init__(self):
def __call__(self, module):
def round_x(x, alpha=0.5):
def get_x0(x, rounding_threshold=0.5, is_sample=False):
def or_tensors(x_1, x_2):
def xor_tensors(x_1, x_2):
def get_mal_data(x_batch, y_batch):
def get_mal_ben_data(x_batch, y_batch):
def java_class_name2smali_name(cls):
def remove_duplicate(components):
def crypt_identifier(idf, seed=2345):
def md5_transform():
def random_string(code):
def sha1_transform():
def string_on_code(code):
def md5_transform():
def random_name(seed=2345, code='abc'):
def apply_encryption(base_string):
def get_sha256(file_path):
class SimplifyClass:
class NonnegWeightConstraint(object):
# Path: config.py
def parser_config():
# Path: core/attack/salt_and_pepper.py
import torch
import numpy as np
from core.attack.base_attack import BaseAttack
from tools import utils
from config import logging, ErrorHandler
logger = logging.getLogger('core.attack.salt_and_pepper')
logger.addHandler(ErrorHandler)
EXP_OVER_FLOW = 1e-120
class Salt_and_pepper(BaseAttack):
def __init__(self, ben_x, oblivion=False, device=None):
super(Salt_and_pepper, self).__init__(oblivion=oblivion, device=device)
self.ben_x = ben_x
def perturb(self, model, x, trials=10, epsilon=10, max_eta=0.001, repetition=10, seed=0, is_apk=False, verbose=False):
# 如果输入x为空或长度小于等于0,返回空列表
if x is None or len(x) <= 0:
return []
# 如果self.ben_x长度小于等于0,直接返回x
if len(self.ben_x) <= 0:
return x
# trials参数不能超过self.ben_x的长度
trials = trials if trials < len(self.ben_x) else len(self.ben_x)
# 获取模型所在的设备(例如:CPU或CUDA)
device = model.device # 假设模型有一个device属性
torch.manual_seed(seed) # 设置随机种子
success_flags = [] # 用于记录每次尝试是否成功的标志
x_mod_list = [] # 用于记录每次尝试修改后的x
# 用于存储对抗样本的数据
| adv_x_list = [] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: IDEA-XL/InstructMol
# Path: llava/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: llava/constants.py
DEFAULT_IMAGE_TOKEN = "<image>"
# Path: llava/constants.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: llava/constants.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: llava/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: llava/model/builder.py
def load_pretrained_model(
model_path,
model_base,
model_name,
load_8bit=False,
load_4bit=False,
device_map="auto",
mm_encoder_cfg=None,
**kwargs
):
kwargs.update({"device_map": device_map})
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
kwargs['torch_dtype'] = torch.float16
if 'llava' in model_name.lower():
# Load LLaVA model
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
if mm_encoder_cfg is not None:
lora_cfg_pretrained = update_pretrained_config(lora_cfg_pretrained, mm_encoder_cfg)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading LLaVA from base model...')
# currently changed to LlavaGraphLlamaForCausalLM
model = LlavaGraphLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
# model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional LLaVA weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
from huggingface_hub import hf_hub_download
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
from peft import PeftModel
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
elif model_base is not None:
# this may be mm projector only
print('Loading LLaVA from base model...')
if 'mpt' in model_name.lower():
if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):
shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
if mm_encoder_cfg is not None:
cfg_pretrained = update_pretrained_config(cfg_pretrained, mm_encoder_cfg)
# currently changed to LlavaGraphLlamaForCausalLM
model = LlavaGraphLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
model.load_state_dict(mm_projector_weights, strict=False)
else:
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
# Load language model
if model_base is not None:
# PEFT model
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
print('Convert to FP16...')
model.to(torch.float16)
else:
use_fast = False
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if 'llava' in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
model.resize_token_embeddings(len(tokenizer))
if hasattr(model, 'get_vision_tower'):
vision_tower = model.get_vision_tower()
if not vision_tower.is_loaded:
vision_tower.load_model()
vision_tower.to(device='cuda', dtype=torch.float16)
image_processor = vision_tower.image_processor
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len
# Path: llava/utils.py
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
# Path: llava/mm_utils.py
def tokenizer_image_token(prompt:str, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
# Path: llava/mm_utils.py
def get_model_name_from_path(model_path):
model_path = model_path.strip("/")
model_paths = model_path.split("/")
if model_paths[-1].startswith('checkpoint-'):
return model_paths[-2] + "_" + model_paths[-1]
else:
return model_paths[-1]
# Path: llava/mm_utils.py
class KeywordsStoppingCriteria(StoppingCriteria):
def __init__(self, keywords, tokenizer, input_ids):
self.keywords = keywords
self.keyword_ids = []
for keyword in keywords:
cur_keyword_ids = tokenizer(keyword).input_ids
if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
cur_keyword_ids = cur_keyword_ids[1:]
self.keyword_ids.append(torch.tensor(cur_keyword_ids))
self.tokenizer = tokenizer
self.start_len = input_ids.shape[1]
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
bs = output_ids.shape[0]
if bs == 1:
offset = min(output_ids.shape[1] - self.start_len, 3)
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids] # cast to device
for keyword_id in self.keyword_ids:
if output_ids[0, -keyword_id.shape[0]:] == keyword_id:
return True
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
for keyword in self.keywords:
if keyword in outputs:
return True
return False
else:
raise NotImplementedError("Only support batch size 1 (yet)")
# Path: llava/mm_utils.py
class MM_ENCODER_CFG(BaseModel):
gin_num_layers: int = 5
gin_hidden_dim: int = 300
drop_ratio: float = 0.1
init_checkpoint: str = None
graph_pooling: str = 'mean'
# Path: llava/mol_utils.py
def check_smiles_validity(smiles:str)->bool:
# check if valid smiles
m = Chem.MolFromSmiles(smiles,sanitize=False)
if m is None:
return False
return True
# Path: llava/datasets/smiles2graph.py
def smiles2graph(smiles_string)->Dict:
"""
Converts SMILES string to graph Data object
:input: SMILES string (str)
:return: graph object
"""
mol = Chem.MolFromSmiles(smiles_string)
# atoms
atom_features_list = []
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature(atom))
x = np.array(atom_features_list, dtype = np.int64)
# bonds
num_bond_features = 2
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = bond_to_feature(bond)
# add edges in both directions
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = np.array(edges_list, dtype = np.int64).T
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = np.array(edge_features_list, dtype = np.int64)
else: # mol has no bonds
edge_index = np.empty((2, 0), dtype = np.int64)
edge_attr = np.empty((0, num_bond_features), dtype = np.int64)
graph = dict()
graph['edge_index'] = edge_index
graph['edge_feat'] = edge_attr
graph['node_feat'] = x
graph['num_nodes'] = len(x)
return graph
# Path: llava/serve/cli_graph.py
import argparse
import torch
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria, MM_ENCODER_CFG
from llava.mol_utils import check_smiles_validity
from llava.datasets.smiles2graph import smiles2graph
from typing import Dict
from transformers import TextStreamer
from torch_geometric.data import Data
def _convert_dict_to_Data(data_dict: Dict) -> Data:
return Data(
x=torch.asarray(data_dict['node_feat']),
edge_attr=torch.asarray(data_dict['edge_feat']),
edge_index=torch.asarray(data_dict['edge_index']),
)
def main(args):
# device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
# graph encoder config
mm_encoder_cfg = MM_ENCODER_CFG(init_checkpoint=args.graph_checkpoint_path)
mm_encoder_cfg = mm_encoder_cfg.dict()
# load model
tokenizer, model, _, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, mm_encoder_cfg=mm_encoder_cfg)
if 'llama-2' in model_name.lower():
conv_mode = "llava_llama_2"
elif "v1" in model_name.lower():
conv_mode = "llava_v1"
elif "mpt" in model_name.lower():
| conv_mode = "mpt" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: iann838/pulsefire
# Path: pulsefire/caches.py
class MemoryCache(BaseCache):
"""Memory Cache.
This cache lives in-memory, be aware of memory footprint when caching large responses.
Example:
```python
MemoryCache()
```
"""
cache: dict[str, tuple[Any, float]]
def __init__(self) -> None:
self.cache = {}
self.last_expired = time.time()
async def get[T](self, key: str) -> T:
value, expire = self.cache[key]
if time.time() > expire:
self.cache.pop(key, None)
raise KeyError(key)
return value
async def set(self, key: str, value: Any, ttl: float) -> None:
if ttl <= 0:
return
self.cache[key] = [value, time.time() + ttl]
if time.time() - self.last_expired > 60:
self.last_expired = time.time()
for old_key, (_, expire) in self.cache.items():
if time.time() > expire:
self.cache.pop(old_key, None)
async def clear(self) -> None:
self.cache.clear()
# Path: pulsefire/caches.py
class DiskCache(BaseCache):
"""Disk Cache.
Requires `diskcache` installed. This cache lives on disk, meaning cheaper storage but slower access.
Example:
```python
DiskCache() # Cache on tmp
DiskCache("folder") # Cache on folder/
```
Parameters:
directory: Cache directory, uses tmp if None.
shards: Number of shards to distribute writes.
serializer: Serializer package supporting `loads` and `dumps`.
"""
def __init__(self, directory: str | None = None, shards: int = 8, serializer=pickle) -> None:
import diskcache
self.directory = directory
self.serializer = serializer
self.cache = diskcache.FanoutCache(directory, shards)
@sync_to_async()
def get[T](self, key: str) -> T:
value = self.cache.get(key)
if value is None:
raise KeyError(key)
return self.serializer.loads(value)
@sync_to_async()
def set(self, key: str, value: Any, ttl: float) -> None:
if ttl <= 0:
return
if math.isinf(ttl):
ttl = None
self.cache.set(key, self.serializer.dumps(value), ttl)
@sync_to_async()
def clear(self) -> None:
self.cache.clear()
# Path: pulsefire/clients.py
class CDragonClient(BaseClient):
"""Community Dragon Client.
| Resources | Support |
| -------------------- | -------------------------- |
| League of Legends | ✅ |
| Legends of Runeterra | ❎ Use DDragon instead. |
| Teamfight Tactics | ✅ |
| Valorant | ❎ |
Example:
```python
async with CDragonClient(
default_params={"patch": "latest", "locale": "default"}
) as client:
champion = await client.get_lol_v1_champion(id=777)
assert champion["name"] == "Yone"
```
"""
Patch = Literal["latest", "pbe"] | _str
Locale = Literal[
"default", "ar_ae", "cs_cz", "de_de", "el_gr", "en_au", "en_gb", "en_ph", "en_sg", "en_us",
"es_ar", "es_es", "es_mx", "fr_fr", "hu_hu", "it_it", "ja_jp", "ko_kr", "pl_pl", "pt_br",
"ro_ro", "ru_ru", "th_th", "tr_tr", "vi_vn", "vn_vn", "zh_cn", "zh_my", "zh_tw",
] | _str
def __init__(
self,
*,
base_url: str = "https://raw.communitydragon.org",
default_params: dict[str, Any] = {"patch": ..., "locale": ...},
default_headers: dict[str, str] = {},
default_queries: dict[str, str] = {},
middlewares: list[Middleware] = [
json_response_middleware(),
http_error_middleware(),
],
) -> None:
super().__init__(
base_url=base_url,
default_params=default_params,
default_headers=default_headers,
default_queries=default_queries,
middlewares=middlewares
)
async def get_lol_champion_bin(self, *, patch: Patch = ..., key_lower: str = ...) -> dict[str, CDragonSchema.LolChampionBinValue]:
return await self.invoke("GET", "/{patch}/game/data/characters/{key_lower}/{key_lower}.bin.json")
async def get_lol_v1_champion(self, *, patch: Patch = ..., locale: Locale = ..., id: int = ...) -> CDragonSchema.LolV1Champion:
return await self.invoke("GET", "/{patch}/plugins/rcp-be-lol-game-data/global/{locale}/v1/champions/{id}.json")
async def get_lol_v1_champion_summary(self, *, patch: Patch = ..., locale: Locale = ...) -> list[CDragonSchema.LolV1ChampionInfo]:
return await self.invoke("GET", "/{patch}/plugins/rcp-be-lol-game-data/global/{locale}/v1/champion-summary.json")
async def get_lol_v1_items(self, *, patch: Patch = ..., locale: Locale = ...) -> list[CDragonSchema.LolV1Item]:
return await self.invoke("GET", "/{patch}/plugins/rcp-be-lol-game-data/global/{locale}/v1/items.json")
async def get_lol_v1_perks(self, *, patch: Patch = ..., locale: Locale = ...) -> list[CDragonSchema.LolV1Perk]:
return await self.invoke("GET", "/{patch}/plugins/rcp-be-lol-game-data/global/{locale}/v1/perks.json")
async def get_lol_v1_summoner_spells(self, *, patch: Patch = ..., locale: Locale = ...) -> list[CDragonSchema.LolV1SummonerSpell]:
return await self.invoke("GET", "/{patch}/plugins/rcp-be-lol-game-data/global/{locale}/v1/summoner-spells.json")
async def get_lol_v1_profile_icons(self, *, patch: Patch = ..., locale: Locale = ...) -> list[CDragonSchema.LolV1ProfileIcon]:
return await self.invoke("GET", "/{patch}/plugins/rcp-be-lol-game-data/global/{locale}/v1/profile-icons.json")
async def get_tft_data(self, *, patch: Patch = ..., locale: Locale = ...) -> CDragonSchema.TftData:
return await self.invoke("GET", "/{patch}/cdragon/tft/{locale}.json")
# Path: pulsefire/functools.py
def async_to_sync(runner: Callable[[Awaitable[Any]], Any] = asyncio.run):
"""Convert a coroutine function to run synchronously. Use as decorator `@async_to_sync()`.
Example:
```python
@async_to_sync()
async def sample_func(number: int):
...
sample_func(0)
```
Parameters:
runner: A callable that runs the awaitable synchronously.
Raises:
TypeError: When `func` is not a coroutine function.
"""
def decorator[**P, R](func: Callable[P, Awaitable[R]]) -> Callable[P, R]:
if not inspect.iscoroutinefunction(func):
raise TypeError(f"{func} is not a coroutine function")
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
return runner(func(*args, **kwargs))
return wrapper
return decorator
# Path: pulsefire/middlewares.py
def cache_middleware(cache: BaseCache, rules: list[tuple[Callable[[Invocation], bool], float]]):
"""Cache middleware.
Recommended to be placed before response deserialization middlewares.
Example:
```python
cache = MemoryCache()
cache_middleware(cache, [
(lambda inv: inv.invoker.__name__ == "get_lol_v1_champion", 3600),
(lambda inv: inv.invoker.__name__ ..., float("inf")), # cache indefinitely.
(lambda inv: inv.url ..., 3600),
(lambda inv: inv.params ..., 3600),
])
```
Parameters:
cache: Cache instance.
rules: Cache rules, defined by a list of (condition, ttl).
"""
rules.append((lambda _: True, 0)) # Add default
def constructor(next: MiddlewareCallable):
async def middleware(invocation: Invocation):
key = f"{invocation.method} {invocation.url}"
for cond, ttl in rules:
if not cond(invocation):
continue
try:
value = await cache.get(key)
except KeyError:
value = await next(invocation)
await cache.set(key, value, ttl)
return value
raise RuntimeError("rules out of range")
return middleware
return constructor
# Path: pulsefire/middlewares.py
def http_error_middleware(max_retries: int = 3):
"""HTTP error middleware.
Should be positioned as late as possible and before rate limiter middlewares
(if any) in the client middlewares list.
Responses are handled differently based on their HTTP status:
| Status | Measures |
| ------ | ------------------------------------- |
| 2XX | Return response. |
| 3XX | Raise `aiohttp.ClientResponseError`. |
| 4XX | Raise `aiohttp.ClientResponseError`. |
| 429 | Exponential retries (2^n). |
| 5XX | Exponential retries (2^n). |
Example:
```python
http_error_middleware(3)
```
Parameters:
max_retries: Number of retries to perform before giving up.
Raises:
aiohttp.ClientResponseError: When retries have exhausted.
"""
def constructor(next: MiddlewareCallable):
async def middleware(invocation: Invocation):
last_response: aiohttp.ClientResponse = None
for attempt in range(max_retries + 1):
if attempt:
await asyncio.sleep(2 ** attempt)
response: aiohttp.ClientResponse = await next(invocation)
last_response = response
if 300 > response.status >= 200:
return response
if not (response.status == 429 or response.status >= 500):
response.raise_for_status()
else:
last_response.raise_for_status()
return middleware
return constructor
# Path: pulsefire/middlewares.py
def json_response_middleware(loads: Callable[[str | bytes | bytearray], Any] = json.loads):
"""JSON response middleware.
Attempts to deserialize JSON responses regardless of content type,
if an exception is raised during deserialization, bytes are returned instead.
Example:
```python
# Use orjson loads for 3~10x faster deserialization
import orjson
json_response_middleware(orjson.loads)
```
Parameters:
loads: JSON decoder to be used on deserialization.
"""
def constructor(next: MiddlewareCallable):
async def middleware(invocation: Invocation):
response: aiohttp.ClientResponse = await next(invocation)
try:
return await response.json(encoding="utf-8", content_type=None, loads=loads)
except Exception:
return await response.read()
return middleware
return constructor
# Path: tests/test_caches.py
from contextlib import contextmanager
from pulsefire.caches import (
MemoryCache,
DiskCache
)
from pulsefire.clients import CDragonClient
from pulsefire.functools import async_to_sync
from pulsefire.middlewares import (
cache_middleware,
http_error_middleware,
json_response_middleware,
)
import asyncio
import time
@contextmanager
def timer():
t1 = t2 = time.perf_counter()
yield lambda: t2 - t1
t2 = time.perf_counter()
@async_to_sync()
async def test_memory_cache():
cache = MemoryCache()
async with CDragonClient(
default_params={"patch": "latest", "locale": "default"},
middlewares=[
cache_middleware(cache, [
(lambda inv: inv.invoker.__name__ == "get_lol_v1_champion", 10),
(lambda inv: inv.invoker.__name__ == "get_lol_v1_items", 200),
(lambda inv: inv.invoker.__name__ == "get_lol_v1_summoner_spells", float("inf")),
]),
json_response_middleware(),
http_error_middleware(),
]
) as client:
with timer() as get_t:
await client.get_lol_v1_items()
assert get_t() > 0.02
with timer() as get_t:
await client.get_lol_v1_items()
assert get_t() < 0.002
with timer() as get_t:
await client.get_lol_v1_summoner_spells()
assert get_t() > 0.02
with timer() as get_t:
await client.get_lol_v1_summoner_spells()
assert get_t() < 0.002
with timer() as get_t:
await client.get_lol_v1_summoner_spells()
assert get_t() < 0.002
with timer() as get_t:
await client.get_lol_v1_champion(id=777)
assert get_t() > 0.02
with timer() as get_t:
await client.get_lol_v1_champion(id=777)
assert get_t() < 0.002
await asyncio.sleep(10)
with timer() as get_t:
await client.get_lol_v1_champion(id=777)
assert get_t() > 0.02
@async_to_sync()
async def test_disk_cache():
cache = DiskCache("tests/__pycache__/diskcache")
await cache.clear()
async with CDragonClient(
default_params={"patch": "latest", "locale": "default"},
middlewares=[
cache_middleware(cache, [
(lambda inv: inv.invoker.__name__ == "get_lol_v1_champion", 10),
(lambda inv: inv.invoker.__name__ == "get_lol_v1_items", 200),
(lambda inv: inv.invoker.__name__ == "get_lol_v1_summoner_spells", float("inf")),
]),
json_response_middleware(),
http_error_middleware(),
]
) as client:
with timer() as get_t:
await client.get_lol_v1_items()
assert get_t() > 0.02
with timer() as get_t:
await client.get_lol_v1_items()
assert get_t() < 0.015
with timer() as get_t:
await client.get_lol_v1_summoner_spells()
assert get_t() > 0.02
with timer() as get_t:
await client.get_lol_v1_summoner_spells()
assert get_t() < 0.002
with timer() as get_t:
await client.get_lol_v1_summoner_spells()
assert get_t() < 0.002
with timer() as get_t:
await client.get_lol_v1_champion(id=777)
assert get_t() > 0.02
with timer() as get_t:
| await client.get_lol_v1_champion(id=777) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Vali-98/XTTS-RVC-UI
# Path: infer_pack/modules.py
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
class ConvReluNorm(nn.Module):
class DDSConv(nn.Module):
class WN(torch.nn.Module):
class ResBlock1(torch.nn.Module):
class ResBlock2(torch.nn.Module):
class Log(nn.Module):
class Flip(nn.Module):
class ElementwiseAffine(nn.Module):
class ResidualCouplingLayer(nn.Module):
class ConvFlow(nn.Module):
def __init__(self, channels, eps=1e-5):
def forward(self, x):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
def forward(self, x, x_mask):
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
def forward(self, x, x_mask, g=None):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
def forward(self, x, x_mask, g=None, **kwargs):
def remove_weight_norm(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
def forward(self, x, x_mask=None):
def remove_weight_norm(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
def forward(self, x, x_mask=None):
def remove_weight_norm(self):
def forward(self, x, x_mask, reverse=False, **kwargs):
def forward(self, x, *args, reverse=False, **kwargs):
def __init__(self, channels):
def forward(self, x, x_mask, reverse=False, **kwargs):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
def forward(self, x, x_mask, g=None, reverse=False):
def remove_weight_norm(self):
def __init__(
self,
in_channels,
filter_channels,
kernel_size,
n_layers,
num_bins=10,
tail_bound=5.0,
):
def forward(self, x, x_mask, g=None, reverse=False):
# Path: infer_pack/attentions.py
class Encoder(nn.Module):
class Decoder(nn.Module):
class MultiHeadAttention(nn.Module):
class FFN(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=10,
**kwargs
):
def forward(self, x, x_mask):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
proximal_bias=False,
proximal_init=True,
**kwargs
):
def forward(self, x, x_mask, h, h_mask):
def __init__(
self,
channels,
out_channels,
n_heads,
p_dropout=0.0,
window_size=None,
heads_share=True,
block_length=None,
proximal_bias=False,
proximal_init=False,
):
def forward(self, x, c, attn_mask=None):
def attention(self, query, key, value, mask=None):
def _matmul_with_relative_values(self, x, y):
def _matmul_with_relative_keys(self, x, y):
def _get_relative_embeddings(self, relative_embeddings, length):
def _relative_position_to_absolute_position(self, x):
def _absolute_position_to_relative_position(self, x):
def _attention_bias_proximal(self, length):
def __init__(
self,
in_channels,
out_channels,
filter_channels,
kernel_size,
p_dropout=0.0,
activation=None,
causal=False,
):
def forward(self, x, x_mask):
def _causal_padding(self, x):
def _same_padding(self, x):
# Path: infer_pack/commons.py
def init_weights(m, mean=0.0, std=0.01):
def get_padding(kernel_size, dilation=1):
def convert_pad_shape(pad_shape):
def kl_divergence(m_p, logs_p, m_q, logs_q):
def rand_gumbel(shape):
def rand_gumbel_like(x):
def slice_segments(x, ids_str, segment_size=4):
def slice_segments2(x, ids_str, segment_size=4):
def rand_slice_segments(x, x_lengths=None, segment_size=4):
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
def subsequent_mask(length):
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
def convert_pad_shape(pad_shape):
def shift_1d(x):
def sequence_mask(length, max_length=None):
def generate_path(duration, mask):
def clip_grad_value_(parameters, clip_value, norm_type=2):
# Path: infer_pack/commons.py
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
# Path: infer_pack/commons.py
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
# Path: infer_pack/commons.py
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
# Path: infer_pack/commons.py
def init_weights(m, mean=0.0, std=0.01):
def get_padding(kernel_size, dilation=1):
def convert_pad_shape(pad_shape):
def kl_divergence(m_p, logs_p, m_q, logs_q):
def rand_gumbel(shape):
def rand_gumbel_like(x):
def slice_segments(x, ids_str, segment_size=4):
def slice_segments2(x, ids_str, segment_size=4):
def rand_slice_segments(x, x_lengths=None, segment_size=4):
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
def subsequent_mask(length):
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
def convert_pad_shape(pad_shape):
def shift_1d(x):
def sequence_mask(length, max_length=None):
def generate_path(duration, mask):
def clip_grad_value_(parameters, clip_value, norm_type=2):
# Path: infer_pack/models.py
import math, pdb, os
import torch
import numpy as np
from time import time as ttime
from torch import nn
from torch.nn import functional as F
from infer_pack import modules
from infer_pack import attentions
from infer_pack import commons
from infer_pack.commons import init_weights, get_padding
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from infer_pack.commons import init_weights
from infer_pack import commons
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x = flow(x, x_mask, g=g, reverse=reverse)
return x
def remove_weight_norm(self):
for i in range(self.n_flows):
self.flows[i * 2].remove_weight_norm()
class PosteriorEncoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
x.dtype
)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
def remove_weight_norm(self):
self.enc.remove_weight_norm()
class Generator(torch.nn.Module):
def __init__(
self,
initial_channel,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=0,
):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(
initial_channel, upsample_initial_channel, 7, 1, padding=3
)
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
upsample_initial_channel // (2**i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)
):
| self.resblocks.append(resblock(ch, k, d)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: facebookresearch/SOC-matching
# Path: SOC_matching/gsbm_lib.py
class EndPointGaussianPath(torch.nn.Module):
def __init__(self, mean, sigma, gamma, basedrift=None):
super(EndPointGaussianPath, self).__init__()
print(f"mean.B: {mean.B}, gamma.B: {gamma.B}")
assert mean.B == gamma.B
self.B = mean.B
self.T = mean.T
self.S = gamma.T
self.D = mean.D
self.mean = mean # t: (T,) --> (B, T, D)
self.sigma = sigma
self.gamma = gamma # t: (T,) --> (B, T)
self.basedrift = basedrift # xt: (*, T, D), t: (T,) --> (*, T, D)
@property
def device(self):
return self.parameters().__next__().device
def sample_xt(self, t, N):
"""marginal
t: (T,) --> xt: (B, N, T, D)
"""
mean_t = self.mean(t) # (B, T, D)
B, T, D = mean_t.shape
assert t.shape == (T,)
std_t = self.gamma(t).reshape(B, 1, T, 1) # (B, 1, T, 1)
noise = torch.randn(B, N, T, D, device=t.device) # (B, N, T, D)
xt = mean_t.unsqueeze(1) + std_t * noise
assert xt.shape == noise.shape
return xt
def ft(self, t, xt, direction):
"""
t: (T,)
xt: (B, N, T, D)
===
ft: (B, N, T, D)
"""
B, N, T, D = xt.shape
assert t.shape == (T,)
if self.basedrift == None:
return torch.zeros_like(xt)
sign = 1.0 if direction == "fwd" else -1
ft = self.basedrift(
xt.reshape(B * N, T, D),
t,
).reshape(B, N, T, D)
return sign * ft
# @profile
def ut(self, t, xt, direction, create_graph_jvp=None, verbose=False):
"""
t: (T,)
xt: (B, N, T, D)
===
ut: (B, N, T, D)
"""
assert (t > 0).all() and (t < 1).all()
B, N, T, D = xt.shape
if verbose:
print(f"xt.shape: {xt.shape}")
assert t.shape == (T,)
create_graph = create_graph_jvp or self.training
mean, dmean = torch.autograd.functional.jvp(
self.mean, t, torch.ones_like(t), create_graph=create_graph
)
if verbose:
print(f"mean.shape: {mean.shape}, dmean.shape: {dmean.shape}")
assert mean.shape == dmean.shape == (B, T, D)
dmean = dmean.reshape(B, 1, T, D)
mean = mean.reshape(B, 1, T, D)
std, dstd = torch.autograd.functional.jvp(
self.gamma, t, torch.ones_like(t).to(t.device), create_graph=create_graph
)
assert std.shape == dstd.shape == (B, T)
if direction == "fwd":
R_inverse = torch.matmul(self.sigma, torch.transpose(self.sigma, 0, 1))
R_inverse_reshape = R_inverse.unsqueeze(0).unsqueeze(0).repeat(B, T, 1, 1)
dstd_reshape = torch.diag_embed(dstd.unsqueeze(2).repeat(1, 1, D))
inverse_std_reshape = torch.diag_embed(
(1 / std).unsqueeze(2).repeat(1, 1, D)
)
a = torch.einsum(
"...ij,...jk->...ik",
(
dstd_reshape
- torch.einsum(
"...ij,...jk->...ik",
R_inverse_reshape,
0.5 * inverse_std_reshape,
)
),
inverse_std_reshape,
)
drift_t = dmean + torch.einsum(
"...ij,...j->...i", a.reshape(B, 1, T, D, D), xt - mean
)
else:
R_inverse = torch.matmul(self.sigma, torch.transpose(self.sigma, 0, 1))
R_inverse_reshape = R_inverse.unsqueeze(0).unsqueeze(0).repeat(B, T, 1, 1)
dstd_reshape = torch.diag_embed(dstd.unsqueeze(2).repeat(1, 1, D))
inverse_std_reshape = torch.diag_embed(
(1 / std).unsqueeze(2).repeat(1, 1, D)
)
a = torch.einsum(
"...ij,...jk->...ik",
(
-dstd_reshape
- torch.einsum(
"...ij,...jk->...ik",
R_inverse_reshape,
0.5 * inverse_std_reshape,
)
),
inverse_std_reshape,
)
drift_t = -dmean + torch.einsum(
"...ij,...j->...i", a.reshape(B, 1, T, D, D), xt - mean
)
ft = self.ft(t, xt, direction)
assert drift_t.shape == ft.shape == xt.shape
return drift_t - ft
def ut_zeros(self, t, xt, direction, create_graph_jvp=None, verbose=False):
return torch.zeros_like(xt).to(xt.device)
def drift(self, x, t, N, direction):
"""
x: (B*N, D)
t: (B*N,)
===
ut: (B*N, D)
"""
assert torch.allclose(t, t[0] * torch.ones_like(t))
BN, D = x.shape
assert BN % N == 0
B = BN // N
_t = t[0].reshape(1)
_x = x.reshape(B, N, 1, D)
u = self.ut(_t, _x, direction)
assert u.shape == _x.shape
return u.reshape(B * N, D)
def forward(self, t, N, direction):
"""
t: (T,)
===
xt: (B, N, T, D)
ut: (B, N, T, D)
"""
xt = self.sample_xt(t, N)
B, N, T, D = xt.shape
assert t.shape == (T,)
ut = self.ut(t, xt, direction)
assert ut.shape == xt.shape
return xt, ut
# Path: SOC_matching/gsbm_lib.py
class GammaSpline(torch.nn.Module):
def __init__(self, t, xt, sigma, fix_init=False, init_knots=1):
"""
t: (T,)
xt: (B, T, 1)
"""
super(GammaSpline, self).__init__()
B, T, D = xt.shape
assert t.shape == (T,) and D == 1
self.T = T
self.B = B
self.sigma = sigma
self.spline = EndPointSpline(t, xt, fix_init=fix_init, init_knots=init_knots)
self.softplus = torch.nn.Softplus()
self.sigmoid = torch.nn.Sigmoid()
self.scale = 1.0 / self.softplus(torch.tensor([1.0]))
@property
def t(self):
return self.spline.t
@property
def xt(self):
return self.spline.xt
@property
def device(self):
return self.spline.device
def forward(self, t):
base_gamma = brownian_motion_std(t, self.sigma)
xt = self.spline(t).squeeze(-1)
gamma = self.scale.to(xt.device) * base_gamma * self.softplus(xt)
return gamma
# Path: SOC_matching/gsbm_lib.py
def init_spline(x0, x1, n_knots):
T, (B, D) = n_knots, x0.shape
assert x1.shape == (B, D)
t = torch.linspace(0, 1, T, device=x0.device)
tt = t.reshape(1, T, 1)
xt = (1 - tt) * x0.reshape(B, 1, D) + tt * x1.reshape(B, 1, D)
assert t.shape == (T,)
assert xt.shape == (B, T, D)
return MeanSpline(t, xt)
# Path: SOC_matching/utils.py
import numpy as np
import torch
import pickle
import os
import copy
from tqdm.notebook import trange
from omegaconf import OmegaConf
from SOC_matching.gsbm_lib import EndPointGaussianPath, GammaSpline, init_spline
(Phi_values_before_update)
/ (Phi_values_before_update - Phi_values_after_update + 1e-6)
+ 1e-6
)
x0 = (
just_stopped.unsqueeze(1)
* (
x0_before
+ step_fraction.unsqueeze(1) * stop_inds.unsqueeze(1) * update
)
+ (1 - just_stopped.unsqueeze(1)) * x0
)
fractional_timestep = (
just_stopped * step_fraction**2 * dt + not_stopped * dt
)
fractional_timesteps.append(fractional_timestep)
stop_inds = sde.Phi(x0) > 0
stop_indicators.append(stop_inds)
else:
fractional_timesteps.append(dt * torch.ones(x0.shape[0]).to(x0.device))
stop_indicators.append(torch.ones(x0.shape[0]).to(x0.device))
xt.append(x0)
controls.append(u0)
if stopping_condition:
log_path_weight_deterministic = (
log_path_weight_deterministic
+ fractional_timestep
/ lmbd
* (-sde.f(t0, x0) - 0.5 * torch.sum(u0**2, dim=1))
)
log_path_weight_stochastic = log_path_weight_stochastic + torch.sqrt(
fractional_timestep / lmbd
) * (-torch.sum(u0 * noise, dim=1))
else:
log_path_weight_deterministic = (
log_path_weight_deterministic
+ dt / lmbd * (-sde.f(t0, x0) - 0.5 * torch.sum(u0**2, dim=1))
)
log_path_weight_stochastic = log_path_weight_stochastic + torch.sqrt(
dt / lmbd
) * (-torch.sum(u0 * noise, dim=1))
log_terminal_weight = -sde.g(x0) / lmbd
if detach:
return (
torch.stack(xt).detach(),
torch.stack(noises).detach(),
torch.stack(stop_indicators).detach(),
torch.stack(fractional_timesteps).detach()
if len(fractional_timesteps) > 0
else None,
log_path_weight_deterministic.detach(),
log_path_weight_stochastic.detach(),
log_terminal_weight.detach(),
torch.stack(controls).detach(),
)
else:
return (
torch.stack(xt),
torch.stack(noises),
torch.stack(stop_indicators),
torch.stack(fractional_timesteps).detach()
if len(fractional_timesteps) > 0
else None,
log_path_weight_deterministic,
log_path_weight_stochastic,
log_terminal_weight,
torch.stack(controls),
)
def control_objective(
sde, x0, ts, lmbd, batch_size, total_n_samples=65536, verbose=False
):
n_batches = int(total_n_samples // batch_size)
effective_n_samples = n_batches * batch_size
for k in range(n_batches):
state0 = x0.repeat(batch_size, 1)
(
_,
_,
_,
_,
log_path_weight_deterministic,
_,
log_terminal_weight,
_,
) = stochastic_trajectories(
sde,
state0,
ts.to(state0),
lmbd,
verbose=verbose,
)
if k == 0:
ctrl_losses = -lmbd * (log_path_weight_deterministic + log_terminal_weight)
else:
ctrl_loss = -lmbd * (log_path_weight_deterministic + log_terminal_weight)
ctrl_losses = torch.cat((ctrl_losses, ctrl_loss), 0)
if k % 32 == 31:
print(f"Batch {k+1}/{n_batches} done")
return torch.mean(ctrl_losses), torch.std(ctrl_losses) / np.sqrt(
effective_n_samples - 1
)
def normalization_constant(
sde, x0, ts, cfg, n_batches_normalization=512, ground_truth_control=None
):
log_weights_list = []
weights_list = []
if ground_truth_control is not None:
norm_sqd_diff_mean = 0
for k in range(n_batches_normalization):
(
states,
_,
| _, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OPPO-Mente-Lab/PEA-Diffusion
# Path: utils/model_utils.py
def add_module_args(parent_args):
parser = parent_args.add_argument_group('Basic Module')
parser.add_argument('--learning_rate', default=5e-5, type=float)
parser.add_argument('--min_learning_rate', default=5e-8, type=float)
parser.add_argument('--lr_decay_steps', default=0, type=int)
parser.add_argument('--lr_decay_ratio', default=1.0, type=float)
parser.add_argument('--warmup_steps', default=0, type=int)
parser.add_argument('--warmup_ratio', default=0.1, type=float)
parser.add_argument('--weight_decay', default=1e-1, type=float)
parser.add_argument('--adam_beta1', default=0.9, type=float)
parser.add_argument('--adam_beta2', default=0.999, type=float)
parser.add_argument('--adam_epsilon', default=1e-8, type=float)
parser.add_argument('--model_path', default=None, type=str)
parser.add_argument('--la', default="zh", type=str)
parser.add_argument('--scheduler_type', default='polynomial', type=str)
return parent_args
# Path: utils/model_utils.py
def configure_optimizers(pl_model: LightningModule, model_params=None):
'''
Args:
pl_model: lightning module
model_params: Model parameters that need to be optimized
'''
# get params that optimizer need
if model_params is None:
optimizer_grouped_params = get_default_update_params(pl_model)
else:
optimizer_grouped_params = model_params
# Configure optimizer.
if isinstance(pl_model.trainer.strategy, DeepSpeedStrategy):
if 'offload_optimizer' in pl_model.trainer.strategy.config['zero_optimization']:
optimizer = DeepSpeedCPUAdam(
optimizer_grouped_params, adamw_mode=True,
lr=pl_model.hparams.learning_rate,
betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2), eps=pl_model.hparams.adam_epsilon)
else:
optimizer = FusedAdam(
optimizer_grouped_params, adam_w_mode=True,
lr=pl_model.hparams.learning_rate,
betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2), eps=pl_model.hparams.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_params, lr=pl_model.hparams.learning_rate,
betas=(pl_model.hparams.adam_beta1, pl_model.hparams.adam_beta2),
eps=pl_model.hparams.adam_epsilon)
# Configure learning rate scheduler.
warmup_steps = pl_model.hparams.warmup_ratio * \
pl_model.total_steps if pl_model.hparams.warmup_steps == 0 else pl_model.hparams.warmup_steps
total_steps = pl_model.hparams.lr_decay_ratio * \
pl_model.total_steps if pl_model.hparams.lr_decay_steps == 0 else pl_model.hparams.lr_decay_steps
scheduler = get_scheduler(name=pl_model.hparams.scheduler_type, optimizer=optimizer,
num_warmup_steps=warmup_steps, num_training_steps=total_steps,
lr_end=pl_model.hparams.min_learning_rate)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
# Path: utils/model_utils.py
def get_total_steps(trainer, hparams):
train_loader = trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if trainer.max_epochs > 0:
world_size = trainer.world_size
tb_size = hparams.train_batchsize * max(1, world_size)
ab_size = trainer.accumulate_grad_batches
total_steps = (len(train_loader.dataset) *
trainer.max_epochs // tb_size) // ab_size
else:
total_steps = trainer.max_steps
return total_steps
# Path: utils/universal.py
class UniversalCheckpoint(ModelCheckpoint):
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('universal checkpoint callback')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_last', action='store_true', default=False)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=None, type=float)
parser.add_argument('--save_weights_only', action='store_true', default=False)
parser.add_argument('--save_on_train_epoch_end', action='store_true', default=None)
parser.add_argument('--load_ckpt_id',default=0, type=int)
parser.add_argument('--load_ckpt_path', default='result/stablediffusion_distill_zh', type=str)
parser.add_argument('--every_n_steps', default=10, type=int)
parser.add_argument('--text_encoder', default='chinese_clip') ## ## mul_clip chinese_clip mt5 alt_clip
parser.add_argument('--text_encoder_path', default='clip_cn_vit-h-14.pt')
parser.add_argument('--hybrid_training', action='store_true', default=True)
parser.add_argument('--KD', action='store_true', default=True)
parser.add_argument('--noise_offset', default=0.5, type=float)
return parent_args
def __init__(self, args):
super().__init__(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
filename=args.filename,
save_last=args.save_last,
every_n_epochs=args.every_n_steps,
save_on_train_epoch_end=args.save_on_train_epoch_end)
# Path: utils/custom_dataset.py
class DataModuleCustom(LightningDataModule):
@ staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('Universal DataModule')
parser.add_argument('--webdataset_base_urls', type=str, nargs="+")
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--shard_width', default=5, type=int)
parser.add_argument('--hr_size', default=-1, type=int)
parser.add_argument('--train_split', default=1.0, type=float)
parser.add_argument('--val_split', default=0.0, type=float)
parser.add_argument('--test_split', default=0.0, type=float)
parser.add_argument('--shuffle_train', default=False, action="store_true")
parser.add_argument('--resample_train', default=False, action="store_true")
parser.add_argument('--shuffle_num', default=None, type=int)
parser.add_argument('--test_prompts', type=str, default="./test_prompts.txt")
parser.add_argument('--test_repeat', default=1, type=int)
parser.add_argument("--resolution", type=int, default=512)
parser.add_argument("--center_crop", default=True)
return parent_args
def __init__(
self,
args,
tokenizer,
custom_collate_fn=None,
use_worker_init_fn=None,
):
super().__init__()
# self.available_shards = list(range(args.start_shard, args.end_shard + 1))
# if splits is None:
# splits = []
splits = {
'train': args.train_split,
'val': args.val_split,
'test': args.test_split,
}
self.webdataset_base_urls = args.webdataset_base_urls
self.num_workers = args.num_workers
self.batch_size = args.batch_size
self.shuffle_train = args.shuffle_train
self.resample_train = args.resample_train
self.shard_width = args.shard_width
self.hr_size = args.hr_size
self.use_worker_init_fn = use_worker_init_fn
self.shuffle_num = args.shuffle_num
self.tokenizer = tokenizer
self.collate_fn = custom_collate_fn if custom_collate_fn is not None else collate_fn
self.center_crop = args.center_crop
self.resolution = args.resolution
self.train_prop = self.val_prop = self.test_prop = 0
self.datasets = {}
self.train_prop = splits['train']
self.train_dataloader = self._train_dataloader
self.datasets['train'] = None
self.prepare_data()
self.setup()
def prepare_data(self):
assert self.train_prop + self.test_prop + self.val_prop == 1
all_urls = []
for url in self.webdataset_base_urls:
all_urls += expand_urls(url)
num_train = round(self.train_prop*len(all_urls))
num_test = round(self.test_prop*len(all_urls))
num_val = len(all_urls) - num_train - num_test
assert num_train + num_test + num_val == len(all_urls), f"{num_train} + {num_test} + {num_val} = {num_train + num_test + num_val} != {len(all_urls)}"
self.train_urls, self.test_urls, self.val_urls = random_split(all_urls, [num_train, num_test, num_val]) # , generator=torch.Generator().manual_seed(self.seed)
def setup(self, stage=None):
if 'train' in self.datasets:
self.datasets['train'] = ImageEmbeddingDataset(
self.train_urls,
self.tokenizer,
shuffle_shards=self.shuffle_train,
resample=self.resample_train,
hr_size=self.hr_size,
handler=wds.handlers.warn_and_continue,
center_crop=self.center_crop,
size=self.resolution,
)
if self.shuffle_num is not None and self.shuffle_num > 0:
self.datasets['train'].shuffle(self.shuffle_num)
def _train_dataloader(self):
# return self.create_dataloader(self.train_urls, shuffle=self.shuffle_train, resample=self.resample_train)
if self.use_worker_init_fn:
init_fn = worker_init_fn
else:
init_fn = None
return DataLoaderX(
self.datasets['train'],
num_workers=self.num_workers,
batch_size=self.batch_size,
prefetch_factor=2, # This might be good to have high so the next npy file is prefetched
pin_memory=True,
shuffle=False,
worker_init_fn=init_fn,
collate_fn=self.collate_fn,
)
# Path: train_sd_zh.py
import os
import torch
import torch.nn as nn
import inspect
import argparse
import open_clip
import cn_clip.clip as clip
from einops import rearrange
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
from utils.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from utils.universal import UniversalCheckpoint
from utils.custom_dataset import DataModuleCustom
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, EulerDiscreteScheduler,DPMSolverMultistepScheduler
from torch.nn import functional as F
from typing import Callable, List, Optional, Union
from torchvision.utils import save_image
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from cn_clip.clip import load_from_name, available_models
if args.load_ckpt_id:
self.proj.load_state_dict(torch.load(
os.path.join(args.load_ckpt_path, f"proj_0_{args.load_ckpt_id}/pytorch_model.bin"), map_location="cpu"))
if args.KD:
self.text_encoder_1 = CLIPTextModel.from_pretrained(f"{args.model_path}/text_encoder")
self.tokenizer_1 = CLIPTokenizer.from_pretrained(f"{args.model_path}/tokenizer")
self.unet_teacher = UNet2DConditionModel.from_pretrained(args.model_path, subfolder="unet")
self.KD_teacher = {}
self.KD_student= {}
cast_hook(self.unet,self.KD_student)
cast_hook(self.unet_teacher,self.KD_teacher)
def setup(self, stage) -> None:
if stage == 'fit':
self.total_steps = 2232142
# self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
model_params = [{'params': self.proj.parameters()}]
return configure_optimizers(self, model_params=model_params)
def encode_prompt(
self,
prompt,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# textual inversion: procecss multi-vector tokens if necessary
text_inputs = self.tokenizer_1(
prompt,
padding="max_length",
max_length=self.tokenizer_1.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
prompt_embeds = self.text_encoder_1(text_input_ids.to(device))
prompt_embeds = prompt_embeds[0]
# bs_embed, seq_len, _ = prompt_embeds.shape
# # duplicate text embeddings for each generation per prompt, using mps friendly method
# prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
# prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
uncond_tokens = [""]*batch_size
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer_1(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_input_ids = uncond_input.input_ids
uncond_embeddings = self.text_encoder_1(uncond_input_ids.to(device))
uncond_embeddings = uncond_embeddings[0]
return prompt_embeds, uncond_embeddings
def training_step(self, batch, batch_idx):
# self.unet.train()
with torch.no_grad():
latents = self.vae.encode(batch["pixel_values"]).latent_dist.sample()
latents = latents* self.vae.config.scaling_factor
noise = torch.randn(latents.shape).to(latents.device)
noise = noise.to(dtype=self.unet.dtype)
bsz = latents.shape[0]
timesteps = torch.randint(0, self.noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
noisy_latents = noisy_latents.to(dtype=self.unet.dtype)
with torch.no_grad():
if args.text_encoder=="mul_clip":
_,encoder_hidden_states = self.text_encoder.encode_text(batch["input_ids"])
_,encoder_hidden_states_uncond = self.text_encoder.encode_text(batch["input_ids_uncond"])
elif args.text_encoder=="chinese_clip":
encoder_hidden_states,_ = self.text_encoder.encode_text(batch["input_ids"])
encoder_hidden_states_uncond,_ = self.text_encoder.encode_text(batch["input_ids_uncond"])
encoder_hidden_states = self.proj(encoder_hidden_states) ## B*77*1024 --> B*77*768
encoder_hidden_states_uncond = self.proj(encoder_hidden_states_uncond)
uncond = 0.1
random = torch.rand(latents.size(0), device=latents.device)
prompt_mask = rearrange(random < uncond, "n -> n 1 1")
encoder_hidden_states = torch.where(prompt_mask, encoder_hidden_states_uncond, encoder_hidden_states)
noise_pred = self.unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0]
lr = self.trainer.optimizers[0].param_groups[0]["lr"]
loss = F.mse_loss(noise_pred, noise, reduction="none")
if args.KD and args.hybrid_training:
## Chinese or English tags in batch
zh_or_not = batch["zh_or_not"].unsqueeze(1).unsqueeze(1).unsqueeze(1)
loss = loss*zh_or_not
loss = loss.mean([1, 2, 3]).mean()
self.log("lr", lr, on_epoch=False, prog_bar=True, logger=True)
| self.log("train_loss", loss.item(), on_epoch=False, prog_bar=True, logger=True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TheMind-AI/fluid-db
# Path: fluiddb/functions/update_memory_function.py
class UpdateMemoryFunction(FunctionBase):
name: str = "update-memory"
description: str = "Tool to update or write to the structured memory of the user."
args_schema: Type[BaseModel] = UpdateMemoryFunctionArguments
def __init__(self):
super().__init__()
self.llm = OpenAILLM()
def run(self, uid: str, user_message: str):
print("RUN, user_message:", user_message)
memory = StructuredJsonMemory()
schema = memory.schema(uid)
schema_descriptions = memory.get_descriptions(uid)
print("SCHEMA:")
print(json.dumps(schema, indent=2))
print(json.dumps(schema_descriptions, indent=2))
# First fetch data
fetch_result = self.maybe_fetch_data(user_message, schema, schema_descriptions)
print("==FETCH==")
print(" REASONING:", fetch_result.reasoning)
print(" JSON PATH LIST:", fetch_result.json_path_list)
fetched_data = {json_path: memory.query(uid, json_path) for json_path in fetch_result.json_path_list}
llm_result = self.maybe_update_memory(user_message, schema, fetched_data, schema_descriptions)
print("==UPDATE==")
print(" REASONING:", llm_result.reasoning)
print(" JSON PATH:", llm_result.json_path)
print(" DATA:", llm_result.data)
print(" DESCRIPTION:", llm_result.description)
data = json.loads(llm_result.data)
memory.update(uid, llm_result.json_path, data, llm_result.description)
print("Done")
# REMINDER: we'll need to deal with timezones here
def maybe_update_memory(self, user_message: str, memory_schema: str, fetched_data=None, schema_descriptions: str = "") -> UpdateMemoryModel:
prompt = self._update_memory_prompt(user_message, memory_schema, fetched_data, schema_descriptions)
model = self.llm.instruction_instructor(prompt, UpdateMemoryModel, max_retries=3)
assert isinstance(model, UpdateMemoryModel)
return model
def maybe_fetch_data(self, user_message: str, memory_schema: str, schema_descriptions: str = "") -> FetchMemoryModel:
prompt = self._retrieve_memory_prompt(user_message, memory_schema, schema_descriptions)
model = self.llm.instruction_instructor(prompt, FetchMemoryModel, max_retries=3)
assert isinstance(model, FetchMemoryModel)
return model
@staticmethod
def _retrieve_memory_prompt(user_message: str, memory_schema: str, schema_descriptions: str = ""):
return f"""
You are a query builder, AI that generates JsonPath query from natural language. You're using jsonpath-ng to query the structured memory.
Current datetime is {datetime.now().strftime("%Y-%m-%d %H:%M")}
The jsonpath-ng uses the following language:
- Nested object: $.objects.nested_object
- Array: $.objects.some_array[*]
- Sorted: $.objects[\\some_field], $.objects[\\some_field,/other_field]
- Filter: $.objects[?some_field =~ "foobar"], $.objects[?(@some_field > 5)] (make sure to put strings and regex into quotes)
You receive a json model schema and a description of the data you need to fetch and you return jsonpath queries for relevant data.
Because you don't know what's in the data, write multiple queries to get as much relevant info as possible, trying to filter based on different strings etc.
Make sure to put strings and regex in quotes when filtering. The regex needs to be string in quotes. It will be evaluated in python re.search function.
You can use regex match (=~) to maximize chances of finding the data.
ALWAYS write queries that support the JSON schema. NEVER query key/values which are not present in the provided json schema.
ALWAYS fetch the whole object from an array, not just a single value. For example, if you're asked for the name of the user, don't return only the name, return the whole object.
If the data you're asked for are not in the schema, return an empty array []
Always expect date in this format: YYYY-MM-DD
Always expect time in this format: HH:MM
Always run an internal dialogue before returning the query.
---
Examples:
SCHEMA:
{{
"phones": [
{{
"name": "string",
"number": "string"
}}
],
"user": {{
"name": "string"
}},
"events": [
{{
"name": "string",
"date": "string",
"price": "number"
}}
]
}}
REQUEST:
What's Adam's phone number?
QUERIES:
$.phones[?name = "adam"]
$.phones[?name = "Adam"]
Notes: fetching whole objects, not just phone number, trying different names to get most data.
REQUEST:
What events are happening tomorrow?
QUERY:
$.events[?(@.date = "2023-12-08")]
Notes: fetching whole objects
---
SCHEMA:
{memory_schema}
{schema_descriptions if schema_descriptions else ""}
REQUEST: {user_message}
"""
@staticmethod
def _update_memory_prompt(user_message: str, memory_schema: str, fetched_data=None, schema_descriptions: str = ""):
return f"""
You are a senior database architect, that creates queries and new data structures from natural language.
Current datetime is {datetime.now().strftime("%Y-%m-%d %H:%M")}
Take the message you received from the user and create a query and data to store in the structured memory.
Try to append data to the existing schema where possible.
Only edit data when you're sure that the data are there. Otherwise append whole new objects.
When it's not possible to fit the data to the current schema make sure to include the description of the new field you create.
Always think about using the memory in the future. You should create lists when we might append more objects of similar type in the future. To create a list the data should be a list: [new data]
Don't put the jsonpath in the data, the object will be automatically created on the path you specify.
Always use strings in lowercase when querying and filtering based on values. If you're comparing strings, use regex match: =~ to maximize chances of finding the data.
If there are similar data in the schema but the data don't fit the current schema, create a new schema and make it inconsistent. Make sure to write a description of the new object schema.
Always store date in this format: YYYY-MM-DD
Always store time in this format: HH:MM
Always run an internal dialogue before returning the query and data.
---
Examples:
SCHEMA:
{{
"phones": [
{{
"name": "string",
"number": "string"
}}
],
"user": {{
"name": "string"
}},
"events": [
{{
"name": "string",
"date": "string",
"price": "number"
}}
]
}}
REQUEST: Adam's phone number is 722263238.
QUERY: $.phones
DATA: {{"name": "adam", "number": "722264238"}}
Notes: appending whole object as I don't know if object with name adam is in the list
REQUEST: My last name is Zvada.
QUERY: $.user.last_name
DATA: Zvada
Notes: I can straight edit here as I know the whole user object
REQUEST: Adam's phone number has +420 prefix.
QUERY: $.phones[?name = "adam"].prefix
DATA: +420
Notes: I can do this ONLY if I'm sure object {{"name": "adam", .. other fields}} exists.
REQUEST: I'm going to a Christmas party tomorrow which costs 20 usd to entry.
QUERY: $.events
DATA: {{"name": "Christmas party", "date": "{(datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d')}", "price": {{"currency": "USD", "value": 20}}}}
DESCRIPTIONS: Events the user is attending
Notes: The price data doesn't fit the model, I will update the model a bit and push it there
REQUEST: What is my brother's name?
QUERY: NA
DATA: {{}}
Notes: This is not in the schema.
---
These are some relevant data from the memory:
{fetched_data}
SCHEMA:
{memory_schema}
{schema_descriptions if schema_descriptions else ""}
REQUEST: {user_message}
"""
# Path: fluiddb/functions/update_sql_memory_function.py
class UpdateSQLMemoryFunction(FunctionBase):
name: str = "update-memory"
description: str = "Tool to update or write to the structured memory of the user."
args_schema: Type[BaseModel] = UpdateSQLMemoryFunctionArguments
def __init__(self):
super().__init__()
self.llm = OpenAILLM()
def run(self, uid: str, user_message: str, prev_requests: str = None):
print("RUN, user_message:", user_message)
memory = StructuredSQLMemory()
schema = memory.schema(uid)
print("SCHEMA:")
print(schema)
# First fetch data
if schema:
fetch_result = self.maybe_fetch_data(user_message, schema, prev_requests=prev_requests)
print("==FETCH==")
print(" REASONING:", fetch_result.reasoning)
print(" QUERIES:", fetch_result.sql_queries)
prev_reasoning = fetch_result.reasoning
fetched_data = [memory.query(uid, q) for q in fetch_result.sql_queries]
else:
prev_reasoning = ""
fetched_data = ""
schema = memory.schema(uid)
llm_result = self.maybe_update_memory(user_message, schema, fetched_data, prev_reasoning=prev_reasoning, prev_requests=prev_requests)
print("==UPDATE==")
print(" REASONING:", llm_result.reasoning)
print(" QUERIES:", llm_result.sql_queries)
update = [memory.query(uid, q) for q in llm_result.sql_queries]
print(update)
print("Done")
# REMINDER: we'll need to deal with timezones here
def maybe_update_memory(self, user_message: str, memory_schema: str, fetched_data=None, prev_reasoning: str = None, prev_requests: str = None) -> SQLQueryModel:
prompt = self._update_memory_prompt(user_message, memory_schema, fetched_data, prev_reasoning, prev_requests)
model = self.llm.instruction_instructor(prompt, SQLQueryModel, max_retries=3)
assert isinstance(model, SQLQueryModel)
return model
def maybe_fetch_data(self, user_message: str, memory_schema: str, prev_requests: str = None) -> SQLQueryModel:
prompt = self._retrieve_memory_prompt(user_message, memory_schema, prev_requests)
model = self.llm.instruction_instructor(prompt, SQLQueryModel, max_retries=3)
assert isinstance(model, SQLQueryModel)
return model
@staticmethod
def _retrieve_memory_prompt(user_message: str, memory_schema: str = "", prev_requests: str = None):
return f"""
You are a senior SQL master, AI that generates SQL Queries from natural language. You're using SQL for sqlite3 to query the database.
Current datetime is {datetime.now().strftime("%Y-%m-%d %H:%M")}
For the given request, return the list of SQL SELECT queries that retrieve the most relevant information from the sqlite database.
You don't know what's in the data, write multiple queries to get as much relevant info as possible.
ALWAYS write SELECT queries that support the SQL TABLES SCHEMA, never make educated guesses.
NEVER SELECT columns that do not exist in SQL TABLES SCHEMA, such query would kill innocent people.
ALWAYS fetch the whole row (*) with the SELECT statement, not just a single column.
When filtering using strings, use LIKE to maximize chances of finding the data. More data is always better.
If the data you're asked for are clearly not in the schema, return an empty string.
Always run an internal dialogue before returning the query.
---
PREVIOUS USER REQUESTS:
{prev_requests if prev_requests else "None"}
SQL TABLES SCHEMA:
{memory_schema if memory_schema else "There are no tables in the DB."}
USER REQUEST: {user_message}
"""
@staticmethod
def _update_memory_prompt(user_message: str, memory_schema: str, fetched_data=None, prev_reasoning: str = None, prev_requests: str = None):
return f"""
You are a senior SQL database architect, AI that creates the best schema for data provided using SQL for sqlite3.
Current datetime is {datetime.now().strftime("%Y-%m-%d %H:%M")}
For the given user request you will return the list of SQL queries that store the information to sqlite database.
ALWAYS think step by step. Run an internal dialogue before returning the queries.
You receive the user request. First, think about how to store the data based on the database schema.
If the data conform to the schema simply insert the new data using INSERT INTO statement.
If you need new columns make sure to create them first using ALTER TABLE ADD COLUMN. Then make sure to INSERT the data in the next query. The order of queries matters!
NEVER make educated guesses, ONLY INSERT data if the columns exist in the SQL TABLES SCHEMA, otherwise create them first.
You can't INSERT or UPDATE a column that does not exist yet. First you must ALTER TABLE ADD COLUMN. Otherwise an error will occur and innocent people will die.
If the data needs a new table make sure to create the new table first using CREATE TABLE. Then make sure to INSERT the data in the next query. The order of queries matters!
Make sure to keep the relationships between the tables using the correct ids.
If you don't get relevant data in the prompt assume there are none and INSERT all data as they're new. If you have relevant data you can update the existing data using UPDATE queries.
ALWAYS remember to insert the data if you created new table or added columns. If you don't store the data in one of the queries the data will be lost forever!
---
PREVIOUS USER REQUESTS:
{prev_requests if prev_requests else "None"}
SQL TABLES SCHEMA:
{memory_schema if memory_schema else "No tables yet in the DB."}
{f"Initial thoughts: {prev_reasoning}" if prev_reasoning else ""}
{f"RELEVANT DATA FROM sqlite DB: {fetched_data}" if fetched_data else ""}
USER REQUEST: {user_message}
"""
# Path: eval/update_memory_eval.py
import json
from datetime import datetime
from fluiddb.functions.update_memory_function import UpdateMemoryFunction
from fluiddb.functions.update_sql_memory_function import UpdateSQLMemoryFunction
from fluiddb.database.json.json_engine import StructuredJsonMemory
from fluiddb.database.sql.structured_sql_memory import StructuredSQLMemory
class UpdateMemoryEval:
def __init__(self):
self.uid = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
# self.uid = "2023-12-14-14-58-36"
def run(self):
# func = UpdateMemoryFunction()
func = UpdateSQLMemoryFunction()
func.run(self.uid, "Adams phone number is 722238738")
func.run(self.uid, "David Mokos phone is 733544390")
func.run(self.uid, "David Mokos phone is 733544390. David's phone is also 6286884994, it's a US phone")
func.run(self.uid, "Tomorrow I have a history test I need to learn for.")
func.run(self.uid, "Davids birthday is September 2")
func.run(self.uid, "Adam likes riding big black horses")
func.run(self.uid, "Adams last name is Zvada")
| func.run(self.uid, "Adam Zvada, the only Adam I know, lives in Prague and SF, Cali") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yiwenlu66/learning-qp
# Path: src/envs/env_creators.py
def tank_initial_generator(size, device, rng):
def tank_ref_generator(size, device, rng):
def tank_randomizer(size, device, rng):
B = torch.tensor(sys_param["tank"]["B"], device=device, dtype=torch.float).unsqueeze(0)
# Path: src/utils/osqp_utils.py
def osqp_oracle(q, b, P, H, return_iter_count=False, max_iter=1000):
sol, iter_count = osqp_solve_qp_guarantee_return(
P=P, q=q, G=-H, h=b,
A=None, b=None, lb=None, ub=None,
max_iter=max_iter, eps_abs=1e-10, eps_rel=1e-10,eps_prim_inf=1e-10, eps_dual_inf=1e-10, verbose=False,
)
if not return_iter_count:
return sol
else:
return sol, iter_count
# Path: src/modules/qp_unrolled_network.py
class QPUnrolledNetwork(nn.Module):
"""
Learn a QP problem from the input using a MLP, then solve the QP using fixed number of unrolled PDHG iterations.
Form of QP:
minimize (1/2)x'Px + q'x
subject to Hx + b >= 0,
where x in R^n, b in R^m.
"""
def __init__(
self, device, input_size, n_qp, m_qp, qp_iter, mlp_builder,
shared_PH=False,
affine_qb=False,
strict_affine_layer=False,
obs_has_half_ref=False,
symmetric=False,
no_b=False,
use_warm_starter=False,
train_warm_starter=False,
ws_loss_coef=1.,
ws_update_rate=0.01,
ws_loss_shaper=lambda x: x ** (1 / 2),
mpc_baseline=None,
use_osqp_for_mpc=False,
imitate_mpc=False,
use_residual_loss=False,
force_feasible=False,
feasible_lambda=10,
is_test=False,
):
"""mlp_builder is a function mapping (input_size, output_size) to a nn.Sequential object.
If shared_PH == True, P and H are parameters indepedent of input, and q and b are functions of input;
Otherwise, (P, H, q, b) are all functions of input.
If affine_qb == True, then q and b are restricted to be affine functions of input.
If strict_affine_layer == True (only effective when affine_qb=True), then:
1. q is linear w.r.t. (x0, xref) (no bias)
2. b is affine w.r.t. x0 (no dependence on xref)
If obs_has_half_ref == True, the policy knows that the observation is in the form (x0, xref), with each taking up half of the dimension of the observation.
If symmetric == True (only effective when affine_qb=True), then:
1. The bias terms are disabled in the modeling of q and b, i.e., q = Wq * x, b = Wb * x.
2. The constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0.
If no_b == True in addition to symmetric == True, then b is skipped altogether, i.e., the constraint is assumed to be -1 <= Hx <= 1.
If mpc_baseline != None and imitate_mpc == False, then the forward function directly returns the solution of the MPC problem, instead of solving the learned QP problem. Can be used for benchmarking MPC.
If mpc_baseline != None and imitate_mpc == True, then the forward function returns the solution of the learned QP problem, but a loss term is computed using the MPC problem. Can be used for supervised imitation learning.
If force_feasible == True, solve the following problem instead of the original QP problem:
minimize_{x,y} (1/2)x'Px + q'x + lambda * y^2
s.t. Hx + b + y * 1 >= 0, y >= 0,
where x in R^n, y in R.
In this case, the solution returned will be of dimension (n + 1).
"""
super().__init__()
self.shared_PH = shared_PH
self.affine_qb = affine_qb
self.strict_affine_layer = strict_affine_layer
self.obs_has_half_ref = obs_has_half_ref
self.device = device
self.input_size = input_size
# QP dimensions: there are the number of variables and constraints WITHOUT considering the slack variable
self.n_qp = n_qp
self.m_qp = m_qp
self.qp_iter = qp_iter
self.symmetric = symmetric
self.no_b = no_b
self.n_P_param = n_qp * (n_qp + 1) // 2
self.n_q_param = n_qp
self.n_H_param = m_qp * n_qp
self.n_b_param = m_qp if not self.no_b else 0
self.n_mlp_output = 0
if not self.shared_PH:
self.n_mlp_output += (self.n_P_param + self.n_H_param)
self.P_params = None
self.H_params = None
else:
self.P_params = nn.Parameter(torch.randn((self.n_P_param,), device=device))
self.H_params = nn.Parameter(torch.randn((self.n_H_param,), device=device))
if not self.affine_qb:
self.n_mlp_output += (self.n_q_param + self.n_b_param)
self.qb_affine_layer = None
else:
if not self.strict_affine_layer:
self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric)
else:
self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref)
if self.n_mlp_output > 0:
self.mlp = mlp_builder(input_size, self.n_mlp_output)
else:
self.mlp = None
# TODO: add preconditioner
self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None
self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None
self.train_warm_starter = train_warm_starter
self.ws_loss_coef = ws_loss_coef
self.ws_update_rate = ws_update_rate
self.ws_loss_shaper = ws_loss_shaper
# P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True)
self.fixed_PH = is_test and shared_PH
# Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning
self.autonomous_losses = {}
self.mpc_baseline = mpc_baseline
self.use_osqp_for_mpc = use_osqp_for_mpc
self.imitate_mpc = imitate_mpc
# Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem
self.use_residual_loss = use_residual_loss
# Whether to force the problem to be feasible
self.force_feasible = force_feasible
self.feasible_lambda = feasible_lambda
self.solver = None
self.info = {}
# Reserved for storing the controllers for each simulation instance when robust MPC is enabled
self.robust_controllers = []
# Store info returned by env
self.env_info = {}
# When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization
self.is_active = None
def initialize_solver(self):
# If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable)
n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp
m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp
# is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver
# When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead
if not self.fixed_PH:
self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible)
else:
# Should be called after loading state dict
Pinv, H = self.get_PH()
self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible)
def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs):
qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H])
X0 = self.warm_starter(qd, bd, Pinvd, Hd)
gt = solver_Xs[:, -1, :].detach()
return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean())
def parallel_controller_creation(self, controller_creator, xref_np, bs):
"""
Create robust MPC controlller in parallel
"""
# Helper function for parallel execution
def task_creator(index):
return controller_creator(self.mpc_baseline, xref_np[index, :])
with ThreadPoolExecutor() as executor:
# Executing the tasks in parallel
results = executor.map(task_creator, range(bs))
# Collecting the results
self.robust_controllers.extend(results)
def run_mpc_baseline(self, x, use_osqp_oracle=False):
robust_method = self.mpc_baseline.get("robust_method", None)
x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x)
bs = x.shape[0]
# Conversions between torch and np
t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float)
f = lambda t: t.detach().cpu().numpy()
f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy())
if robust_method is None:
# Run vanilla MPC without robustness
eps = 1e-3
n, m, P, q, H, b = mpc2qp(
self.mpc_baseline["n_mpc"],
self.mpc_baseline["m_mpc"],
self.mpc_baseline["N"],
t(self.mpc_baseline["A"]),
t(self.mpc_baseline["B"]),
t(self.mpc_baseline["Q"]),
t(self.mpc_baseline["R"]),
self.mpc_baseline["x_min"] + eps,
self.mpc_baseline["x_max"] - eps,
self.mpc_baseline["u_min"],
self.mpc_baseline["u_max"],
x0,
xref,
normalize=self.mpc_baseline.get("normalize", False),
Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]),
)
if not use_osqp_oracle:
solver = QPSolver(x.device, n, m, P=P, H=H)
Xs, primal_sols = solver(q, b, iters=100)
sol = primal_sols[:, -1, :]
else:
osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True)
if q.shape[0] > 1:
sol_np, iter_counts = np_batch_op(osqp_oracle_with_iter_count, f(q), f(b), f_sparse(P), f_sparse(H))
sol = t(sol_np)
else:
sol_np, iter_count = osqp_oracle_with_iter_count(f(q[0, :]), f(b[0, :]), f_sparse(P), f_sparse(H))
sol = t(sol_np).unsqueeze(0)
iter_counts = np.array([iter_count])
# Save OSQP iteration counts into the info dict
if "osqp_iter_counts" not in self.info:
self.info["osqp_iter_counts"] = iter_counts
else:
self.info["osqp_iter_counts"] = np.concatenate([self.info["osqp_iter_counts"], iter_counts])
return sol, (P.unsqueeze(0), q, H.unsqueeze(0), b)
elif robust_method in ["scenario", "tube"]:
# Set up scenario or tube MPC
if not self.robust_controllers:
# Create a controller for each simulation instance, according to the current reference (note: this assumes that the mapping from instance index to reference is constant)
controller_creator = {
"scenario": scenario_robust_mpc,
"tube": tube_robust_mpc,
}[robust_method]
xref_np = f(xref)
self.parallel_controller_creation(controller_creator, xref_np, bs)
self.is_active = np.ones((bs,), dtype=bool)
# Get solutions according to current state
x0_np = f(x0)
already_on_stats = f(self.env_info.get("already_on_stats", torch.zeros((bs,), dtype=bool))).astype(bool)
self.is_active = np.logical_not(already_on_stats) & self.is_active # Skip computation for instances already done
get_solution = lambda i: self.robust_controllers[i](x0_np[i, :], is_active=self.is_active[i])
sol_np, running_time = np_batch_op(get_solution, np.arange(bs))
sol = t(sol_np)
# Save running time to info dict
non_zero_mask = running_time != 0. # Filter out instances that are already done
running_time_eff = running_time[non_zero_mask]
if "running_time" not in self.info:
self.info["running_time"] = running_time_eff
else:
self.info["running_time"] = np.concatenate([self.info["running_time"], running_time_eff])
return sol, None
def get_PH(self, mlp_out=None):
"""
Compute P, H matrices from the parameters.
Notice: returns (Pinv, H) instead of (P, H)
"""
# Decode MLP output
end = 0
if not self.shared_PH:
start = end
end = start + self.n_P_param
P_params = mlp_out[:, start:end]
start = end
end = start + self.n_H_param
H_params = mlp_out[:, start:end]
else:
P_params = self.P_params.unsqueeze(0)
H_params = self.H_params.unsqueeze(0)
# Reshape P, H vectors into matrices
Pinv = make_psd(P_params, min_eig=1e-2)
H = H_params.view(-1, self.m_qp, self.n_qp)
# If the problem is forced to be feasible, compute the parameters (\tilde{P}, \tilde{H}) of the augmented problem
# \tilde{P} = [P, 0; 0, lambda]
if self.force_feasible:
zeros_n = torch.zeros((1, self.n_qp, 1), device=self.device)
I = torch.eye(1, device=self.device).unsqueeze(0)
tilde_P_inv = torch.cat([
torch.cat([Pinv, zeros_n], dim=2),
torch.cat([zeros_n.transpose(1, 2), 1 / self.feasible_lambda * I], dim=2)
], dim=1)
# \tilde{H} = [H, I; 0, I]
ones_m = torch.ones((1, self.m_qp, 1), device=self.device)
tilde_H = torch.cat([
torch.cat([H, ones_m], dim=2),
torch.cat([zeros_n.transpose(1, 2), I], dim=2)
], dim=1)
Pinv, H = tilde_P_inv, tilde_H
return Pinv, H
def get_qb(self, x, mlp_out=None):
"""
Compute q, b vectors from the parameters.
"""
bs = x.shape[0]
end = self.n_P_param + self.n_H_param if not self.shared_PH else 0
if not self.affine_qb:
start = end
end = start + self.n_q_param
q = mlp_out[:, start:end]
start = end
end = start + self.n_b_param
b = mlp_out[:, start:end]
else:
qb = self.qb_affine_layer(x)
q = qb[:, :self.n_q_param]
b = qb[:, self.n_q_param:]
if self.no_b:
b = torch.zeros((bs, self.m_qp), device=self.device)
# If the problem is forced to be feasible, compute the parameters (\tilde{q}, \tilde{b}) of the augmented problem
if self.force_feasible:
zeros_1 = torch.zeros((bs, 1), device=self.device)
# \tilde{q} = [q; 0]
tilde_q = torch.cat([q, zeros_1], dim=1)
# \tilde{b} = [b; 0]
tilde_b = torch.cat([b, zeros_1], dim=1)
q, b = tilde_q, tilde_b
return q, b
def forward(self, x, return_problem_params=False, info=None):
if info is not None:
self.env_info = info
if self.mpc_baseline is not None:
mpc_sol, mpc_problem_params = self.run_mpc_baseline(x, use_osqp_oracle=self.use_osqp_for_mpc)
if (self.mpc_baseline is not None) and (not self.imitate_mpc):
# MPC solution is directly used as the final solution
sol, problem_params = mpc_sol, mpc_problem_params
else:
# Check whether solver has been initialized
if self.solver is None:
self.initialize_solver()
bs = x.shape[0]
# Run MLP forward pass, if necessary
if self.mlp is not None:
mlp_out = self.mlp(x)
else:
mlp_out = None
# Compute P, H, if they are not fixed
if not self.fixed_PH:
Pinv, H = self.get_PH(mlp_out)
else:
Pinv, H = None, None
# Compute q, b
q, b = self.get_qb(x, mlp_out)
# Update parameters of warm starter with a delay to stabilize training
if self.train_warm_starter:
self.warm_starter_delayed.load_state_dict(interpolate_state_dicts(self.warm_starter_delayed.state_dict(), self.warm_starter.state_dict(), self.ws_update_rate))
# Run solver forward
if self.use_residual_loss:
Xs, primal_sols, residuals = self.solver(q, b, Pinv=Pinv, H=H, iters=self.qp_iter, return_residuals=True)
primal_residual, dual_residual = residuals
residual_loss = ((primal_residual ** 2).sum(dim=-1) + (dual_residual ** 2).sum(dim=-1)).mean()
self.autonomous_losses["residual"] = 1e-3 * residual_loss
else:
Xs, primal_sols = self.solver(q, b, Pinv=Pinv, H=H, iters=self.qp_iter)
sol = primal_sols[:, -1, :]
# Compute warm starter loss
if self.train_warm_starter:
self.autonomous_losses["warm_starter"] = self.compute_warm_starter_loss(q, b, Pinv, H, Xs)
# Compute imitation loss
if self.imitate_mpc:
# Use min(n of learned qp, n of mpc) as the common dimension of solution
sol_dim = min(self.n_qp, mpc_sol.shape[-1])
self.autonomous_losses["imitation_only"] = ((sol[:, :sol_dim] - mpc_sol[:, :sol_dim]) ** 2).sum(dim=-1).mean()
if return_problem_params:
problem_params = (torch.linalg.inv(Pinv), q, H, b)
if not return_problem_params:
# Only return the solution
return sol
else:
# Return the solution as well as (P, q, H, b)
return sol, problem_params
# Path: experiments/tank/test_skip_steady.py
import sys
import os
import numpy as np
import torch
from src.envs.env_creators import sys_param, env_creators
from src.utils.osqp_utils import osqp_oracle
from icecream import ic
from src.modules.qp_unrolled_network import QPUnrolledNetwork
# %% Problem setup
file_path = os.path.dirname(__file__)
sys.path.append(os.path.join(file_path, "../.."))
x_ref = np.array([19., 19., 2., 2.])
A = sys_param["tank"]["A"]
B = sys_param["tank"]["B"]
Q = sys_param["tank"]["Q"]
R = sys_param["tank"]["R"]
x_min = sys_param["tank"]["x_min"] * np.ones(4)
x_max = sys_param["tank"]["x_max"] * np.ones(4)
u_min = sys_param["tank"]["u_min"] * np.ones(2)
u_max = 1.0 * np.ones(2)
# %% Oracle
# min (x - x_ref)' * Q * (x - x_ref) + u' * R * u, s.t., x = (I - A)^{-1} * B * u, x_min <= x <= x_max, u_min <= u <= u_max; cast into min 0.5 * u' * P * u + q' * u, s.t., H * u + b >= 0
inv_I_minus_A = np.linalg.inv(np.eye(A.shape[0]) - A)
P = 2 * (B.T @ inv_I_minus_A.T @ Q @ inv_I_minus_A @ B + R)
# Calculate q
q = -2 * inv_I_minus_A.T @ Q.T @ x_ref @ B
# Calculate c
c = x_ref.T @ Q @ x_ref
# Calculate H and b
H = np.vstack([
inv_I_minus_A @ B,
-inv_I_minus_A @ B,
np.eye(u_min.shape[0]),
-np.eye(u_max.shape[0])
])
b = np.hstack([
-x_min,
x_max,
-u_min,
u_max
])
u_opt = osqp_oracle(q, b, P, H)
x_opt = inv_I_minus_A @ B @ u_opt
# %% Evaluation
eval_value = lambda u: 0.5 * u.T @ P @ u + q.T @ u + c
opt_val = eval_value(u_opt)
ic(opt_val)
# %% Evaluate the learned controller
def get_state_dict(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
model = checkpoint["model"]
prefix = "a2c_network.policy_net."
policy_net_state_dict = {k.lstrip(prefix): v for (k, v) in model.items() if k.startswith(prefix)}
if "running_mean_std.running_mean" in model:
running_mean = model["running_mean_std.running_mean"].to(dtype=torch.float)
running_std = model["running_mean_std.running_var"].sqrt().to(dtype=torch.float)
else:
running_mean = torch.tensor([0.])
running_std = torch.tensor([1.])
return policy_net_state_dict, running_mean, running_std
| def make_obs(x, x_ref, running_mean, running_std, normalize): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhiheLu/Ensemble_VLM
# Path: trainers/cocoop.py
class CoCoOp(TrainerX):
def check_cfg(self, cfg):
assert cfg.TRAINER.COCOOP.PREC in ["fp16", "fp32", "amp"]
def build_model(self):
cfg = self.cfg
classnames = self.dm.dataset.classnames
print(f"Loading CLIP (backbone: {cfg.MODEL.BACKBONE.NAME})")
clip_model = load_clip_to_cpu(cfg)
if cfg.TRAINER.COCOOP.PREC == "fp32" or cfg.TRAINER.COCOOP.PREC == "amp":
# CLIP's default precision is fp16
clip_model.float()
print("Building custom CLIP")
self.model = CustomCLIP(cfg, classnames, clip_model)
print("Turning off gradients in both the image and the text encoder")
name_to_update = "prompt_learner"
for name, param in self.model.named_parameters():
if name_to_update not in name:
param.requires_grad_(False)
# Double check
enabled = set()
for name, param in self.model.named_parameters():
if param.requires_grad:
enabled.add(name)
print(f"Parameters to be updated: {enabled}")
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model.prompt_learner, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
# NOTE: only give prompt_learner to the optimizer
self.optim = build_optimizer(self.model.prompt_learner, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model("prompt_learner", self.model.prompt_learner, self.optim, self.sched)
self.scaler = GradScaler() if cfg.TRAINER.COCOOP.PREC == "amp" else None
# Note that multi-gpu training could be slow because CLIP's size is
# big, which slows down the copy operation in DataParallel
device_count = torch.cuda.device_count()
if device_count > 1:
print(f"Multiple GPUs detected (n_gpus={device_count}), use all of them!")
self.model = nn.DataParallel(self.model)
def forward_backward(self, batch):
image, label = self.parse_batch_train(batch)
model = self.model
optim = self.optim
scaler = self.scaler
prec = self.cfg.TRAINER.COCOOP.PREC
if prec == "amp":
with autocast():
loss = model(image, label)
optim.zero_grad()
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
else:
loss = model(image, label)
optim.zero_grad()
loss.backward()
optim.step()
loss_summary = {"loss": loss.item()}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return loss_summary
def model_inference(self, input, feature=False):
return self.model(input, feature=feature)
def parse_batch_train(self, batch):
input = batch["img"]
label = batch["label"]
input = input.to(self.device)
label = label.to(self.device)
return input, label
def load_model(self, directory, epoch=None):
if not directory:
print("Note that load_model() is skipped as no pretrained model is given")
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = "model-best.pth.tar"
if epoch is not None:
model_file = "model.pth.tar-" + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError('Model not found at "{}"'.format(model_path))
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint["state_dict"]
epoch = checkpoint["epoch"]
# Ignore fixed token vectors
if "token_prefix" in state_dict:
del state_dict["token_prefix"]
if "token_suffix" in state_dict:
del state_dict["token_suffix"]
print("Loading weights to {} " 'from "{}" (epoch = {})'.format(name, model_path, epoch))
# set strict=False
self._models[name].load_state_dict(state_dict, strict=False)
# Path: trainers/promptsrc.py
class PromptSRC(TrainerX):
def check_cfg(self, cfg):
assert cfg.TRAINER.PROMPTSRC.PREC in ["fp16", "fp32", "amp"]
def build_model(self):
cfg = self.cfg
classnames = self.dm.dataset.classnames
print(f"Loading CLIP (backbone: {cfg.MODEL.BACKBONE.NAME})")
clip_model = load_clip_to_cpu(cfg)
if cfg.TRAINER.PROMPTSRC.PREC == "fp32" or cfg.TRAINER.PROMPTSRC.PREC == "amp":
# CLIP's default precision is fp16
clip_model.float()
print("Building custom CLIP")
self.model = CustomCLIP(cfg, classnames, clip_model)
print("Turning off gradients in both the image and the text encoder")
name_to_update = "prompt_learner"
for name, param in self.model.named_parameters():
if name_to_update not in name:
# Make sure that VPT prompts are updated
if "VPT" in name:
param.requires_grad_(True)
else:
param.requires_grad_(False)
else:
if "ZS_image_encoder" in name:
param.requires_grad_(False)
# Double check
enabled = set()
for name, param in self.model.named_parameters():
if param.requires_grad:
enabled.add(name)
print(f"Parameters to be updated: {enabled}")
print(f"Parameters count: {len(enabled)}")
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
# NOTE: only give prompt_learner to the optimizer
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model("VLPromptLearner", self.model, self.optim, self.sched)
# Cosine scheduler
self.total_epochs = cfg.OPTIM.MAX_EPOCH
self.step_counter = 1
N = cfg.OPTIM.MAX_EPOCH
mean = cfg.TRAINER.PROMPTSRC.GPA_MEAN
stdev = cfg.TRAINER.PROMPTSRC.GPA_STD
gauss = self.get_gauss(mean, stdev)
self.gauss = np.array([gauss(a) for a in range(1, N + 1)])
self.gauss = self.gauss / sum(self.gauss)
self.scaler = GradScaler() if cfg.TRAINER.PROMPTSRC.PREC == "amp" else None
# Note that multi-gpu training could be slow because CLIP's size is
# big, which slows down the copy operation in DataParallel
device_count = torch.cuda.device_count()
if device_count > 1:
print(f"Multiple GPUs detected (n_gpus={device_count}), use all of them!")
self.model = nn.DataParallel(self.model)
# Keep model with GPA
self.previous_model_gpa = None
def forward_backward(self, batch):
image, label = self.parse_batch_train(batch)
model = self.model
optim = self.optim
scaler = self.scaler
prec = self.cfg.TRAINER.PROMPTSRC.PREC
if prec == "amp":
with autocast():
loss = model(image, label)
optim.zero_grad()
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
else:
loss_ce, normalized_text_features, zs_clip_text_embeddings, zs_image_embedd, image_ft, \
zero_shot_logits, logits = model(image, label)
# Calculate the L_SCL_text loss
loss_scl_text = F.l1_loss(normalized_text_features, zs_clip_text_embeddings.cuda(),
reduction='mean') * self.cfg.TRAINER.PROMPTSRC.TEXT_LOSS_WEIGHT
# Calculate the L_SCL_image loss
loss_scl_image = F.l1_loss(image_ft, zs_image_embedd.cuda(),
reduction='mean') * self.cfg.TRAINER.PROMPTSRC.IMAGE_LOSS_WEIGHT
# Now calculate L_SCL_logits
L_SCL_logits = F.kl_div(
F.log_softmax(logits / 1, dim=1),
F.log_softmax(zero_shot_logits / 1, dim=1),
reduction='sum',
log_target=True
) * (1 * 1) / logits.numel()
L_SCL = (L_SCL_logits + loss_scl_text + loss_scl_image)
loss = (loss_ce + L_SCL)
optim.zero_grad()
loss.backward()
optim.step()
loss_summary = {"loss": loss.item()}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
# Means one epoch is completed, perform GPA
self.step_counter = self.step_counter + 1
current_epoch_weight = self.gauss[self.step_counter - 2]
current_model_weights = copy.deepcopy(model.state_dict())
weighted_state_dict = self.state_dict_weighting(current_model_weights, current_epoch_weight)
if self.previous_model_gpa is None:
self.previous_model_gpa = weighted_state_dict
else:
self.previous_model_gpa = self.state_dict_add(weighted_state_dict, self.previous_model_gpa)
if self.step_counter == self.model.total_epochs + 1:
print("Using GPA model for final inference...")
model.load_state_dict(self.previous_model_gpa)
self.model.load_state_dict(self.previous_model_gpa)
return loss_summary
def model_inference(self, input, feature=False):
return self.model(input, feature=feature)
def state_dict_weighting(self, main_dict, weightage, prompt_only=False):
# Average all parameters
updated_dict = copy.deepcopy(main_dict)
if not prompt_only:
for key in main_dict:
updated_dict[key] = main_dict[key] * weightage
return updated_dict
else:
return main_dict * weightage
def state_dict_add(self, dict1, dict2, prompt_only=False):
# Average all parameters
if not prompt_only:
modified_dict = dict2
for key in dict1:
modified_dict[key] = (modified_dict[key] + dict1[key])
return modified_dict
else:
return dict1 + dict2
def get_gauss(self, mu, sigma):
gauss = lambda x: (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((x - mu) / sigma) ** 2)
return gauss
def parse_batch_train(self, batch):
input = batch["img"]
label = batch["label"]
input = input.to(self.device)
label = label.to(self.device)
return input, label
def load_model(self, directory, epoch=None):
if not directory:
print("Note that load_model() is skipped as no pretrained model is given")
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = "model-best.pth.tar"
if epoch is not None:
model_file = "model.pth.tar-" + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError('Model not found at "{}"'.format(model_path))
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint["state_dict"]
epoch = checkpoint["epoch"]
# Ignore fixed token vectors
if "prompt_learner.token_prefix" in state_dict:
del state_dict["prompt_learner.token_prefix"]
if "prompt_learner.token_suffix" in state_dict:
del state_dict["prompt_learner.token_suffix"]
print("Loading weights to {} " 'from "{}" (epoch = {})'.format(name, model_path, epoch))
# set strict=False
self._models[name].load_state_dict(state_dict, strict=False)
# Path: trainers/baseline_en.py
import os.path as osp
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.cuda.amp import GradScaler, autocast
from dassl.engine import TRAINER_REGISTRY, TrainerX
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import load_checkpoint
from .cocoop import CoCoOp
from .promptsrc import PromptSRC
self.promptsrc_vit32.load_model(
f"{cfg.TRAINER.ENLEARN.MODEL_DIR}/{cfg.MODEL.BACKBONE.NAME}/seed{cfg.SEED}",
epoch=20
)
self.promptsrc_vit32.model.eval()
# Load vit-16
cfg.MODEL.BACKBONE.NAME = "ViT-B/16"
print(f"Loading CLIP (backbone: {cfg.MODEL.BACKBONE.NAME})")
self.promptsrc_vit16 = PromptSRC(cfg)
self.promptsrc_vit16.load_model(
f"{cfg.TRAINER.ENLEARN.MODEL_DIR}/{cfg.MODEL.BACKBONE.NAME}/seed{cfg.SEED}",
epoch=20
)
self.promptsrc_vit16.model.eval()
# Weight generator
self.model = nn.Sequential(
nn.Linear(1024, 1024//cfg.TRAINER.ENLEARN.DOWNSCALE),
nn.ReLU(),
nn.Linear(1024//cfg.TRAINER.ENLEARN.DOWNSCALE, cfg.TRAINER.ENLEARN.NUM_WEIGHT)
).type(self.dtype)
self.model.to(self.device)
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model("model", self.model, self.optim, self.sched)
self.scaler = GradScaler() if cfg.TRAINER.PROMPTSRC.PREC == "amp" else None
def forward_backward(self, batch):
image, label = self.parse_batch_train(batch)
model = self.model
optim = self.optim
scaler = self.scaler
with torch.no_grad():
logits_vit32, feat_vit32 = self.promptsrc_vit32.model_inference(image, feature=True)
logits_vit16, feat_vit16 = self.promptsrc_vit16.model_inference(image, feature=True)
prec = self.cfg.TRAINER.PROMPTSRC.PREC
if prec == "amp":
with autocast():
# Weight generation
feat_list = [feat_vit32, feat_vit16]
feat_cat = torch.cat(feat_list, dim=1)
weights = model(feat_cat).unsqueeze(2) # (B, 2, 1)
logits_cat = torch.cat(
[logits_vit32.unsqueeze(1),
logits_vit16.unsqueeze(1)], dim=1) # (B, 2, 500)
logits = torch.sum(logits_cat * weights, dim=1)
loss = F.cross_entropy(logits, label)
optim.zero_grad()
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
else:
# Weight generation
feat_list = [feat_vit32, feat_vit16]
feat_cat = torch.cat(feat_list, dim=1)
weights = model(feat_cat).unsqueeze(2) # (B, 2, 1)
logits_cat = torch.cat(
[logits_vit32.unsqueeze(1),
logits_vit16.unsqueeze(1)], dim=1) # (B, 2, 500)
logits = torch.sum(logits_cat * weights, dim=1)
loss = F.cross_entropy(logits, label)
optim.zero_grad()
loss.backward()
optim.step()
loss_summary = {"loss": loss.item()}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return loss_summary
def parse_batch_train(self, batch):
input = batch["img"]
label = batch["label"]
input = input.to(self.device)
label = label.to(self.device)
return input, label
def model_inference(self, image):
with torch.no_grad():
logits_vit32, feat_vit32 = self.promptsrc_vit32.model_inference(image, feature=True)
logits_vit16, feat_vit16 = self.promptsrc_vit16.model_inference(image, feature=True)
# Weight generation
feat_list = [feat_vit32, feat_vit16]
feat_cat = torch.cat(feat_list, dim=1)
weights = self.model(feat_cat).unsqueeze(2) # (B, 2, 1)
logits_cat = torch.cat(
[logits_vit32.unsqueeze(1),
logits_vit16.unsqueeze(1)], dim=1) # (B, 2, 500)
logits = torch.sum(logits_cat * weights, dim=1)
return logits
def load_model(self, directory, epoch=None):
if not directory:
print("Note that load_model() is skipped as no pretrained model is given")
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = "model-best.pth.tar"
| if epoch is not None: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: armed-gpt/gpt-blazing
# Path: gpt_blazing/model/interface.py
class Role(Enum):
SYSTEM = 'system'
USER = 'user'
ASSISTANT = 'assistant'
@classmethod
def from_string(cls, text: str):
return _TEXT_TO_ROLE[text]
# Path: gpt_blazing/model/baichuan2/model.py
class Baichuan2Model(torch.nn.Module):
def __init__(self, config: Baichuan2ModelConfig) -> None:
super().__init__()
self.config = config
self.apply_nan_to_num_to_alibi_mask = config.apply_nan_to_num_to_alibi_mask
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
# [num_heads, model_max_length, model_max_length]
# TODO: dtype issue here.
self.register_buffer(
"alibi_mask",
_gen_alibi_mask(config.num_attention_heads, config.model_max_length),
persistent=False,
)
self.layers = torch.nn.ModuleList([
BaichuanLayer(config) for _ in range(config.num_hidden_layers)
])
self.norm = RMSNorm(config.hidden_size, epsilon=config.rms_norm_eps)
self.lm_head = NormHead(config.hidden_size, config.vocab_size)
def half(self):
self = super().half()
if self.apply_nan_to_num_to_alibi_mask:
self.alibi_mask.nan_to_num_()
return self
def bfloat16(self):
self = super().bfloat16()
if self.apply_nan_to_num_to_alibi_mask:
self.alibi_mask.nan_to_num_()
return self
def forward(
self,
input_pos: torch.Tensor,
end: int,
input_ids: torch.Tensor,
):
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
for layer in self.layers:
hidden_states = layer(
input_pos=input_pos,
end=end,
hidden_states=hidden_states,
attention_mask=self.alibi_mask,
)
hidden_states = self.norm(hidden_states)
logits = self.lm_head(hidden_states)
return logits, hidden_states
# Path: gpt_blazing/model/baichuan2/model.py
class Baichuan2ModelConfig:
hidden_size: int = 5120
initializer_range: float = 0.02
intermediate_size: int = 13696
model_max_length: int = 4096
model_max_batch_size: int = 1
num_attention_heads: int = 40
num_hidden_layers: int = 40
pad_token_id: int = 0
rms_norm_eps: float = 1e-06
vocab_size: int = 125696
use_original_attn_impl: bool = True
apply_nan_to_num_to_alibi_mask: bool = False
debug: bool = False
# Path: gpt_blazing/model/baichuan2/model.py
def quantize_int8(model: Baichuan2Model, struct_only: bool = False):
replace_linear_weight_only_int8_per_channel(model, struct_only)
return model
# Path: gpt_blazing/model/baichuan2/model.py
def quantize_fp8(model: Baichuan2Model, struct_only: bool = False):
replace_linear_weight_only_fp8_per_channel(model, struct_only)
return model
# Path: gpt_blazing/model/baichuan2/model.py
class EmptyInitOnDevice(torch.overrides.TorchFunctionMode): # type: ignore
def __init__(self, device=None): # type: ignore
self.device = device
def __torch_function__(self, func, types, args=(), kwargs=None): # type: ignore
kwargs = kwargs or {}
if getattr(func, '__module__', None) == 'torch.nn.init':
if 'tensor' in kwargs:
return kwargs['tensor']
else:
return args[0]
device_constructors = torch.utils._device._device_constructors() # type: ignore
if (
self.device is not None and func in device_constructors and kwargs.get('device') is None
):
kwargs['device'] = self.device
return func(*args, **kwargs)
# Path: gpt_blazing/model/baichuan2/model.py
def load_model(
model_pt: str,
config: Optional[Baichuan2ModelConfig] = None,
int8: bool = True,
fp8: bool = False,
):
if config is None:
config = Baichuan2ModelConfig()
with EmptyInitOnDevice():
model = Baichuan2Model(config)
model.eval()
model.bfloat16()
if int8:
model = quantize_int8(model, struct_only=True)
elif fp8:
model = quantize_fp8(model, struct_only=True)
model.load_state_dict(torch.load(model_pt, map_location='cpu'))
return model
# Path: gpt_blazing/model/baichuan2/model.py
def model_prefill_2048(
model: Baichuan2Model,
input_pos: torch.Tensor,
input_ids: torch.Tensor,
):
return model(input_pos=input_pos, end=2048, input_ids=input_ids)
# Path: gpt_blazing/model/baichuan2/model.py
def model_prefill_4096(
model: Baichuan2Model,
input_pos: torch.Tensor,
input_ids: torch.Tensor,
):
return model(input_pos=input_pos, end=4096, input_ids=input_ids)
# Path: gpt_blazing/model/baichuan2/model.py
def compile_model_prefill(func): # type: ignore
return torch.compile(func, fullgraph=True, dynamic=True)
# Path: gpt_blazing/model/baichuan2/model.py
def model_decode_one_token_2048(
model: Baichuan2Model,
input_pos: torch.Tensor,
input_ids: torch.Tensor,
):
return model(input_pos=input_pos, end=2048, input_ids=input_ids)
# Path: gpt_blazing/model/baichuan2/model.py
def model_decode_one_token_4096(
model: Baichuan2Model,
input_pos: torch.Tensor,
input_ids: torch.Tensor,
):
return model(input_pos=input_pos, end=4096, input_ids=input_ids)
# Path: gpt_blazing/model/baichuan2/model.py
def compile_model_decode_one_token(func): # type: ignore
return torch.compile(func, mode="reduce-overhead", fullgraph=True)
# Path: gpt_blazing/model/baichuan2/model.py
def model_dispatch(
model: Baichuan2Model,
func_2048: Any,
func_4096: Any,
input_pos: torch.Tensor,
input_ids: torch.Tensor,
):
if func_2048 is None:
func = func_4096
else:
if input_pos[-1] < 2048:
func = func_2048
else:
func = func_4096
# https://github.com/pytorch-labs/gpt-fast/issues/31
with torch.inference_mode():
with torch.backends.cuda.sdp_kernel(
enable_flash=False,
enable_mem_efficient=False,
enable_math=True,
):
logits, hidden_states = func(model, input_pos, input_ids)
logits = logits.detach()
hidden_states = hidden_states.detach()
return logits, hidden_states
# Path: gpt_blazing/model/baichuan2/model.py
def model_get_cache(
model: Baichuan2Model,
length: int,
device: Optional[str] = None,
):
attn_cache: List[Tuple[torch.Tensor, torch.Tensor]] = []
for layer in model.layers:
k_cache = layer.self_attn.k_cache[:, :, :length].clone()
v_cache = layer.self_attn.v_cache[:, :, :length].clone()
if device:
k_cache = k_cache.to(device, non_blocking=True)
v_cache = v_cache.to(device, non_blocking=True)
attn_cache.append((k_cache, v_cache))
return attn_cache
# Path: gpt_blazing/model/baichuan2/model.py
def model_set_cache(
model: Baichuan2Model,
length: int,
attn_cache: Sequence[Tuple[torch.Tensor, torch.Tensor]],
):
assert len(model.layers) == len(attn_cache)
for layer, (k_cache, v_cache) in zip(model.layers, attn_cache):
layer.self_attn.k_cache[:, :, :length] = k_cache.to(
layer.self_attn.k_cache.device,
non_blocking=True,
)
layer.self_attn.v_cache[:, :, :length] = v_cache.to(
layer.self_attn.v_cache.device,
non_blocking=True,
)
# Path: gpt_blazing/model/baichuan2/utility.py
def convert_hf_model_to_model(hf_model: Any):
import torch
with EmptyInitOnDevice():
model = Baichuan2Model(Baichuan2ModelConfig(debug=True))
model.bfloat16()
assert hf_model.dtype == torch.bfloat16 # type: ignore
baichuan_model = hf_model.model
model.embed_tokens.load_state_dict(baichuan_model.embed_tokens.state_dict())
for layer_idx, layer in enumerate(model.layers):
layer.load_state_dict(baichuan_model.layers[layer_idx].state_dict())
model.norm.load_state_dict(baichuan_model.norm.state_dict())
model.lm_head.load_state_dict(hf_model.lm_head.state_dict())
return model
# Path: gpt_blazing/model/baichuan2/tokenizer.py
class Baichuan2Tokenizer:
def __init__(self, model_file: str) -> None:
self.sp_model = SentencePieceProcessor()
self.sp_model.Load(model_file)
self.eos_token_id = 2
self.user_token_id = 195
self.assistant_token_id = 196
def tokenize(self, text: str) -> Sequence[int]:
return self.sp_model.tokenize(text) # type: ignore
def chat_tokenize(self, rounds: Sequence[Tuple[Role, str]]):
input_ids = []
system = None
if rounds[0][0] == Role.SYSTEM:
system = rounds[0][1]
input_ids.extend(self.tokenize(system))
rounds = rounds[1:]
num_system_tokens = len(input_ids)
for role, text in rounds:
if role == Role.USER:
input_ids.append(self.user_token_id)
elif role == Role.ASSISTANT:
input_ids.append(self.assistant_token_id)
else:
raise NotImplementedError()
input_ids.extend(self.tokenize(text))
assert rounds[-1][0] == Role.USER
input_ids.append(self.assistant_token_id)
return input_ids, system, num_system_tokens
def decode(self, tokens: Sequence[int]) -> str:
return self.sp_model.decode(tokens) # type: ignore
# Path: gpt_blazing/model/baichuan2/inference.py
class Baichuan2ModelInferenceConfig:
model_folder: str
model_config: Baichuan2ModelConfig = attrs.field(factory=Baichuan2ModelConfig)
quantization_mode: QuantizationMode = QuantizationMode.INT8
device: str = 'cuda:0'
cache_capacity: int = 20
use_dynamic_dispatch: bool = True
skip_torch_compile: bool = False
# Path: gpt_blazing/model/baichuan2/inference.py
class Baichuan2ModelInference(ModelInference[Baichuan2ModelInferenceConfig]):
def __init__(
self,
config: Baichuan2ModelInferenceConfig,
func_process_model: Optional[Callable[[Any], None]] = None,
):
super().__init__(config, func_process_model)
self.device = config.device
self.model_max_length = 4096
# For cache.
self.cached_system: Optional[str] = None
self.lru_cache = LruCache(config.cache_capacity)
self.model_is_loaded = False
self.model_is_compiled = False
def load_model(self, device: Optional[str] = None) -> None:
if device:
self.device = device
logger.info(
f'Initializing Baichuan2Inference(config={self.config}), '
f'device={self.device}'
)
model_fd = io.folder(self.config.model_folder, exists=True)
# TODO: support more modes.
assert self.config.quantization_mode == QuantizationMode.INT8
model_pt = str(model_fd / f'{self.config.quantization_mode.value}.pt')
logger.info(f'Loading model_pt={model_pt}')
self.model = load_model(model_pt=model_pt, config=self.config.model_config, int8=True)
logger.info('Model loaded.')
tokenizer_model = str(model_fd / 'tokenizer.model')
logger.info(f'Loading tokenizer_model={tokenizer_model}')
self.tokenizer = Baichuan2Tokenizer(tokenizer_model)
logger.info('Tokenizer loaded.')
logger.info(f'Moving model to device={self.device}')
self.model = self.model.to(self.device)
if self.func_process_model is not None:
logger.info('func_process_model is set, calling func_process_model(self.model)...')
self.func_process_model(self.model)
self.model_is_loaded = True
def compile_model(self) -> None:
assert self.model_is_loaded
if self.config.skip_torch_compile:
logger.info('skip_torch_compile is set, abort. (only for debugging)')
self.prefill_4096 = model_prefill_4096
self.decode_one_token_4096 = model_decode_one_token_4096
self.prefill_2048 = None
self.decode_one_token_2048 = None
self.model_is_compiled = True
return
logger.info('Compiling model...')
self.prefill_4096 = compile_model_prefill(model_prefill_4096)
self.decode_one_token_4096 = compile_model_decode_one_token(model_decode_one_token_4096)
self.prefill_2048 = None
self.decode_one_token_2048 = None
if self.config.use_dynamic_dispatch:
self.prefill_2048 = compile_model_prefill(model_prefill_2048)
self.decode_one_token_2048 = compile_model_decode_one_token(model_decode_one_token_2048)
self.trigger_model_compilation()
self.model_is_compiled = True
def model_is_ready(self) -> bool:
return self.model_is_compiled
def trigger_model_compilation(self):
import torch._dynamo.config
import torch._inductor.config
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True
logger.info('Trigger prefill compilation.')
input_ids = torch.tensor([self.tokenizer.tokenize('随便写点什么')], dtype=torch.int)
input_ids = input_ids.to(self.device)
for offset in [0, 2048]:
logger.info(f'offset={offset}')
for idx in range(5):
input_pos = torch.arange(
offset,
offset + int(input_ids.shape[1]),
device=input_ids.device,
dtype=torch.int,
)
_, num_seconds = timed(
lambda: model_dispatch(
model=self.model,
func_2048=self.prefill_2048,
func_4096=self.prefill_4096,
input_pos=input_pos,
input_ids=input_ids,
)
)
logger.info(f'[{idx}]: prefill compilation: {num_seconds}s.')
logger.info('Trigger decode_one_token compilation.')
for offset in [0, 2048]:
logger.info(f'offset={offset}')
for idx in range(5):
input_pos = torch.tensor([offset + idx], device=self.device, dtype=torch.int)
input_ids = torch.tensor(
[[random.randint(0, self.config.model_config.vocab_size)]],
dtype=torch.int,
device=self.device,
)
_, num_seconds = timed(
lambda: model_dispatch(
model=self.model,
func_2048=self.decode_one_token_2048,
func_4096=self.decode_one_token_4096,
input_pos=input_pos,
input_ids=input_ids,
)
)
logger.info(f'[{idx}]: decode_one_token compilation: {num_seconds}s.')
def get_eos_token(self):
return self.tokenizer.eos_token_id
def get_model_max_length(self):
return self.model_max_length
def get_hidden_size(self):
return self.config.model_config.hidden_size
def model_prefill(self, rounds: Sequence[Tuple[Role, str]], cache_system: bool = False):
input_ids = None
system = None
num_system_tokens = 0
begin = 0
initialized = False
if cache_system:
system = None
if rounds[0][0] == Role.SYSTEM:
system = rounds[0][1]
if system:
cache = self.lru_cache.get(system)
if cache is not None:
num_system_tokens, attn_cache = cache
# Cache hit.
if system != self.cached_system:
# Need to move the cache to model.
model_set_cache(self.model, num_system_tokens, attn_cache)
self.cached_system = system
# Skip tokenizing system.
input_ids, _, _num_system_tokens = self.tokenizer.chat_tokenize(rounds[1:])
assert _num_system_tokens == 0
begin = num_system_tokens
initialized = True
if not initialized:
input_ids, system, num_system_tokens = self.tokenizer.chat_tokenize(rounds)
# Invalidate the model cache.
self.cached_system = None
assert input_ids
end = begin + len(input_ids)
if end >= self.model_max_length:
return None
input_pos = torch.arange(begin, end, device=self.device, dtype=torch.int)
input_ids = torch.tensor([input_ids], dtype=torch.int, device=self.device)
logits, hidden_states = model_dispatch(
model=self.model,
func_2048=self.prefill_2048,
func_4096=self.prefill_4096,
input_pos=input_pos,
input_ids=input_ids,
)
if cache_system and system and self.cached_system is None:
# Add to cache.
self.cached_system = system
self.lru_cache.set(
system,
(num_system_tokens, model_get_cache(self.model, num_system_tokens)),
)
return logits, hidden_states, end
def model_decode_one_token(self, input_pos: torch.Tensor, input_ids: torch.Tensor):
logits, hidden_states = model_dispatch(
model=self.model,
func_2048=self.decode_one_token_2048,
func_4096=self.decode_one_token_4096,
input_pos=input_pos,
input_ids=input_ids,
)
return logits, hidden_states
def tokenizer_decode(self, tokens: Sequence[int]):
return self.tokenizer.decode(tokens)
# Path: gpt_blazing_experiment/model/debug_baichuan2.py
from typing import Tuple, Sequence, Optional
from datetime import datetime
from gpt_blazing.model.interface import Role
from gpt_blazing.model.baichuan2.model import (
Baichuan2Model,
Baichuan2ModelConfig,
quantize_int8,
quantize_fp8,
EmptyInitOnDevice,
load_model,
model_prefill_2048,
model_prefill_4096,
compile_model_prefill,
model_decode_one_token_2048,
model_decode_one_token_4096,
compile_model_decode_one_token,
model_dispatch,
model_get_cache,
model_set_cache,
)
from gpt_blazing.model.baichuan2.utility import convert_hf_model_to_model
from gpt_blazing.model.baichuan2.tokenizer import Baichuan2Tokenizer
from gpt_blazing.model.baichuan2.inference import (
Baichuan2ModelInferenceConfig,
Baichuan2ModelInference,
)
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers.generation.utils import GenerationConfig
from transformers import AutoTokenizer
from transformers.generation.utils import GenerationConfig
from transformers import AutoTokenizer
from transformers.generation.utils import GenerationConfig
import torch
import torch.nn.functional as F
import sentencepiece as spm
import iolite as io
import os
import torch._dynamo.config
import torch._inductor.config
import random
import torch._dynamo.config
import torch._inductor.config
import os
import random
import os
BAICHUAN2_13B_MODEL_FOLDER = str(
io.folder(
'$GPT_BLAZING_DATA/base/Baichuan2-13B-Chat',
expandvars=True,
)
)
def load_hf_model(
model_folder: str = BAICHUAN2_13B_MODEL_FOLDER,
device_map: Optional[str] = None,
):
return AutoModelForCausalLM.from_pretrained(
model_folder,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map=device_map,
)
def eval_hf_model():
with EmptyInitOnDevice():
| model = load_hf_model() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tmllab/Machine_Vision_Therapy
# Path: mydatasets/utils/ina.py
# Path: mydatasets/utils/inr.py
# Path: mydatasets/utils/inv.py
# Path: myeva/eva_clip.py
def build_eva_model_and_transforms(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
force_quick_gelu: bool = False,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
):
model = create_model(
model_name, pretrained, precision, device,
force_quick_gelu=force_quick_gelu)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_val = image_transform(model.visual.image_size, mean=image_mean, std=image_std)
return model, preprocess_val
# Path: fine_tuning.py
import os
import sys
import json
import time
import copy
import torch
import myclip
import pickle
import argparse
import mydatasets
import random
import numpy as np
import templates as templates
import torch.backends.cudnn as cudnn
from tqdm import tqdm
from PIL import Image
from tqdm import tqdm
from torch import optim
from timm.utils import accuracy, AverageMeter
from torchvision import transforms, datasets
from torch.utils.data import Dataset, DataLoader
from mydatasets.utils.ina import imagenet_a_mask
from mydatasets.utils.inr import imagenet_r_mask
from mydatasets.utils.inv import imagenet_v_mask
from myeva.eva_clip import build_eva_model_and_transforms
from torchvision.datasets import CIFAR10, MNIST, CIFAR100
from .utils import AverageMeter, create_logger, create_scheduler
clip_model, preprocess = myclip.load('RN50')
clip_model = clip_model.to('cuda:1', dtype=torch.bfloat16)
# create dataset
if args.dataset=='domainbed':
dataset = MVTDataset(meta_file, preprocess)
train_dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
val_dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
else:
if args.dataset=='cifar10' or args.dataset=='cifar100':
dataset = MVTCifarDataset(meta_file, preprocess)
elif args.dataset=='mnist':
dataset = MVTMnistDataset(root=meta_file, transform=preprocess)
else:
dataset = MVTDataset(args, meta_file, preprocess)
train_dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
val_dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
text_template_mapping = {
'mnist': 'mnist_template',
'cifar10': 'cifar_template',
'cifar100': 'cifar_template',
'iwildcam': 'iwildcam_template',
'imagenet': 'openai_imagenet_template',
'domainbed': 'cifar_template',
}
templates = getattr(templates, text_template_mapping[args.dataset])
get_image_text_loader_fn = getattr(mydatasets, 'get_' + args.dataset)
_, _, _, class_names = get_image_text_loader_fn(args, preprocess, return_classnames=True)
zeroshot_weights = zeroshot_classifier(clip_model, class_names, templates)
zeroshot_weights=zeroshot_weights.to(torch.float32)
clip_model=clip_model.visual
clip_model=clip_model.to(torch.float32)
clip_model.eval()
# create teacher clip_model and logits
teacher_model=copy.deepcopy(clip_model)
teacher_model.eval()
for k, v in teacher_model.named_parameters():
v.requires_grad = False
with open(os.path.join('logits/', teacher_json), 'r') as f:
logits_json=json.load(f)
# Format of teacher_logits:
# image_paths: [[topk_image_labels], [topk_image_logits]]
teacher_logits={}
for tmp in logits_json['logits']:
key_dir=list(tmp.keys())[0]
teacher_logits[key_dir]=tmp[key_dir]
# optimizer
optim_kwargs={
# 'lr': 1e-6,
'lr': 5e-7,
'betas': (0.9, 0.999),
'eps': 1e-8,
'weight_decay': 5e-7,
}
optimizer = optim.Adam(params= filter(lambda p: p.requires_grad, clip_model.parameters()), **optim_kwargs)
# scheduler
logger.info('Initialize scheduler.')
updates_per_epoch = len(train_dataloader)
sched_kwargs={
'sched': 'cosine',
# 'num_epochs': 10,
'num_epochs': 3,
'warmup_epochs': 0,
'min_lr': 5e-8,
'step_on_epochs': False,
'updates_per_epoch': updates_per_epoch
}
scheduler, num_epochs = create_scheduler(optimizer, **sched_kwargs)
# evaluate first
val_acc = evaluation_epoch(args, -1, num_epochs, val_dataloader, mask, zeroshot_weights, clip_model, logger)
logger.info(f'First evaluation, acc is {val_acc}.')
# training epochs
for epoch in range(num_epochs):
# clip_model training
train_loss, batch_time = train_epoch(args, epoch, num_epochs, train_dataloader, mask, zeroshot_weights, clip_model, teacher_model, teacher_logits, optimizer, scheduler, logger)
# clip_model evaluation
val_acc = evaluation_epoch(args, epoch, num_epochs, val_dataloader, mask, zeroshot_weights, clip_model, logger)
# record loss
logger.info(f'Finish training epoch {epoch}. The train loss is {train_loss}, the val acc is {val_acc}, the batch time is {batch_time}.')
# save state dict
checkpoint={'clip_model': clip_model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'lr': scheduler.state_dict()}
torch.save(checkpoint, os.path.join(args.output_dir, f'save{epoch}.pth'))
logger.info('Finish training.')
def get_logits(images, zeroshot_weights, teacher_model):
with torch.no_grad():
image_features = teacher_model(images)
image_features = image_features/image_features.norm(dim=-1, keepdim=True)
logits = image_features @ zeroshot_weights
return logits
def train_epoch(args, epoch, num_epochs, train_dataloader, mask, zeroshot_weights, clip_model, teacher_model, teacher_logits, optimizer, scheduler, logger):
# settings
num_updates=epoch*len(train_dataloader)
train_loss_m = AverageMeter()
batch_time_m = AverageMeter()
timer = time.time()
crossentropy=torch.nn.CrossEntropyLoss()
log_scale=torch.ones([]) * np.log(1 / 0.07)
log_scale.cuda()
# to train mode
clip_model.train()
for batch_id, data in enumerate(train_dataloader):
| images, targets, image_paths = data[0], data[1], data[2] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hubo0417/EasyGC
# Path: sdxl/generate_style_config.py
# Path: sdxl/sd_refiner_model.py
class SD_Refiner_Model:
model_id: str = BASE_CONFIG["sdxl_refiner_path"]
export: bool = True
single_lock = RLock()
is_cuda: bool = False
n_steps: int = 50
high_noise_frac: float = 0.8
guidance_scale: float = 9 # 数值越低,画面效果越趋近于抽象油画
is_combine_base: bool = True
H: int = 1024 # 图片的高
W: int = 1024 # 图片的宽
def __init__(self, **kwargs) -> None:
self.init_params(**kwargs)
self.base_model: SD_Base_Model = None
self.refiner_model = None
def init_params(self, **kwargs):
if "model_id" in kwargs:
self.model_id = kwargs["model_id"]
if "is_cuda" in kwargs:
self.is_cuda = bool(kwargs["is_cuda"])
if "n_steps" in kwargs:
self.n_steps = int(kwargs["n_steps"])
if self.n_steps > 100 or self.n_steps < 30:
self.n_steps = 40
if "guidance_scale" in kwargs:
self.guidance_scale = float(kwargs["guidance_scale"])
if self.guidance_scale > 8:
self.guidance_scale = 7.5
if "is_combine_base" in kwargs:
self.is_combine_base = bool(kwargs["is_combine_base"])
if "H" in kwargs:
self.H = int(kwargs["H"])
if self.H > 1024 or self.H < 128:
self.H = 1024
if "H" in kwargs:
self.W = int(kwargs["W"])
if self.W > 1024 or self.W < 128:
self.W = 1024
# 通过基础图片+单条提示词得到另一些精炼加工图片
def get_image_to_image_single_prompt(self,
query: str,
image_url: str = None,
image_count: int = 1,
negative_prompt: str = None):
# 获取潜在空间的图片通过单条提示词
def _get_base_latent_images_single_prompt(query: str,
image_count: int = 1,
negative_prompt: str = None):
if self.base_model is not None:
images = self.base_model.get_base_latent_image_single_prompt(
query=query,
image_count=image_count,
negative_prompt=negative_prompt)
return images
else:
return None
target_size: tuple[int, int] = (self.H, self.W)
if image_url is None and self.is_combine_base is True:
init_images = _get_base_latent_images_single_prompt(
query, image_count, negative_prompt)
images = self.refiner_model(prompt=query,
num_inference_steps=self.n_steps,
denoising_start=self.high_noise_frac,
num_images_per_prompt=image_count,
negative_prompt=negative_prompt,
image=init_images,
target_size=target_size).images
else:
init_image = load_image(image_url).convert("RGB")
images = self.refiner_model(prompt=query,
image=init_image,
num_inference_steps=self.n_steps,
guidance_scale=self.guidance_scale,
negative_prompt=negative_prompt,
num_images_per_prompt=image_count,
target_size=target_size).images
return images
# 通过基础图片+多条提示词得到另一些精炼加工图片
def get_image_to_image_multiple_prompts(self,
prompts: List[str],
image_count: int = 1,
negative_prompt: str = None):
target_size: tuple[int, int] = (self.H, self.W)
# 获取潜在空间的图片通过多条提示词
def _get_base_latent_image_multiple_prompts(
prompts: List[str],
image_count: int = 1,
negative_prompt: str = None):
if self.base_model is not None:
images = self.base_model.get_base_latent_image_multiple_prompts(
prompts=prompts,
image_count=image_count,
negative_prompt=negative_prompt)
return images
else:
return None
if self.is_combine_base is True:
negative_prompts: List[str] = []
for item in prompts:
negative_prompts.append(negative_prompt)
init_images = _get_base_latent_image_multiple_prompts(
prompts=prompts,
image_count=image_count,
negative_prompt=negative_prompt)
images = self.refiner_model(
prompt=prompts,
num_inference_steps=self.n_steps,
denoising_start=self.high_noise_frac,
num_images_per_prompt=image_count,
image=init_images,
target_size=target_size,
negative_prompt=negative_prompts).images
else:
raise ValueError("REFINER模型并未定义成需要和BASE模型一起使用")
return images
def unload_model(self):
if self.refiner_model is not None:
self.refiner_model = None
if self.base_model is not None:
if self.base_model.base_model is not None:
self.base_model.base_model = None
torch.cuda.empty_cache()
def load_model(self):
self.unload_model()
if self.is_combine_base is True:
# 处理baseModel
if self.base_model is None:
self.base_model = SD_Base_Model.instance(
n_steps=self.n_steps,
high_noise_frac=self.high_noise_frac,
is_cuda=self.is_cuda,
H=self.H / 2,
W=self.W / 2)
if self.base_model.base_model is None:
self.base_model.load_model()
# 处理refinerModel
if self.refiner_model is None:
self.refiner_model = DiffusionPipeline.from_pretrained(
self.model_id,
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
text_encoder_2=self.base_model.base_model.text_encoder_2,
vae=self.base_model.base_model.vae)
else:
self.base_model = None
if self.refiner_model is None:
self.refiner_model = StableDiffusionXLImg2ImgPipeline.from_pretrained(
self.model_id,
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True)
if self.is_cuda is True:
self.refiner_model.to("cuda")
else:
self.refiner_model.enable_model_cpu_offload()
@classmethod
def instance(cls, *args, **kwargs):
if not hasattr(SD_Refiner_Model, "_instance"):
with SD_Refiner_Model.single_lock:
if not hasattr(SD_Refiner_Model, "_instance"):
SD_Refiner_Model._instance = cls(*args, **kwargs)
else:
SD_Refiner_Model._instance.init_params(**kwargs)
return SD_Refiner_Model._instance
# Path: utils/image_search/image_search_util_txt.py
class Image_Search_Util_Txt:
search_count_from_google: int = 10
search_count_from_embedding_db: int = 2
collection_name = "images_blip_source"
destination: str = "D:\\EasyGC\\images_download\\blips"
return_top_n = 0.25
def __init__(self, **kwargs) -> None:
if "search_count_from_google" in kwargs:
self.search_count_from_google = int(
kwargs["search_count_from_google"])
if "search_count_from_embedding_db" in kwargs:
self.search_count_from_embedding_db = int(
kwargs["search_count_from_embedding_db"])
if "return_top_n" in kwargs:
self.return_top_n = float(kwargs["return_top_n"])
# 1、用图片搜索的时候,提取图片文本摘要
def blip_image(self, image_url: Union[list, str], prefix: str = None):
if not image_url:
raise ValueError("未能获取到对应图片地址,此参数不能为空")
blip_model = Blip_Model()
result: list = []
if isinstance(image_url, list):
for item in image_url:
blip_str = blip_model.generate_text_from_image(image_url=item,
text=prefix)
result.append(blip_str)
else:
blip_str = blip_model.generate_text_from_image(image_url=image_url,
text=prefix)
result.append(blip_str)
return result
# 1/2、文本/图片摘要搜索向量数据库
def search_embedding_by_text(self, text: str):
final_data: list = None
embeddings = EmbeddingHelper(collection_name=self.collection_name)
where_document = {"$contains": text}
search_result = embeddings.query(
message=text,
count=self.search_count_from_embedding_db,
is_find_metedata=True,
filter={"source": "images"},
where_document=where_document)
if search_result and len(search_result) > 0:
final_data = [item["image_path"] for item in search_result]
return final_data
# 3、根据文本/图片摘要搜索互联网
def search_image_by_google(self, text: str) -> list[dict]:
def download_images(image_urls: list):
images = []
for url in image_urls:
try:
image_name = str(uuid.uuid4())
local_path = f"{self.destination}\\{image_name}.jpg"
response = requests.get(url, stream=True)
response.raise_for_status()
with open(local_path, 'wb') as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
images.append(local_path)
except requests.exceptions.RequestException as e:
continue
return images
base_url = "https://www.googleapis.com/customsearch/v1"
_params = f"key={GOOGLE_APIKEY}&cx={GOOGLE_SEARCH_ID}"
content = []
system_params = f"{_params}&q={text}&lr=lang_zh-CN&sort=review-rating:d:s&searchType=image"
page_count = int(self.search_count_from_google / 10)
for i in range(0, page_count):
start = i * 10 + 1
system_params = f"{system_params}&start={start}"
search_result = json.loads(
requests.get(url=f"{base_url}?{system_params}").text)
if search_result and "items" in search_result:
for item in search_result["items"]:
content.append(item["link"])
images = download_images(content)
return images
# 4、将从互联网搜索出来的图片进行摘要提取并与原始输入摘要/文本进行余弦距离计算,返回相似度最高的前面N条
def compare_google_and_orignal_blipinfo(self, google_result: list,
original_text: str):
blip_google_results = []
blip_model = Blip_Model()
for item in google_result:
blip_info = blip_model.generate_text_from_image(image_url=item)
blip_google_results.append({
"blip_info": blip_info,
"image_path": item,
"distance": 0
})
helper = EmbeddingHelper(collection_name=self.collection_name)
orignal_tensor = helper.embeddingModel.embed_query(original_text)
for i in range(0, len(blip_google_results)):
google_tensor = helper.embeddingModel.embed_query(
blip_google_results[i]["blip_info"])
consine_distance = 1 - distance.cosine(google_tensor,
orignal_tensor)
blip_google_results[i]["distance"] = consine_distance
num = int(self.search_count_from_google * self.return_top_n)
result = sorted(blip_google_results,
key=lambda x: x["distance"],
reverse=True)[:num]
return result
# 5、将图片信息存入向量数据库,并返回存入成功的图片数据
def embedding_image_info(self, images: list):
if images is None or len(images) <= 0:
raise ValueError("images参数必须包含有效值")
helper = EmbeddingHelper(collection_name=self.collection_name)
result_images = []
for image in images:
if "image_path" not in image or "blip_info" not in image:
continue
item = {}
item["source"] = "images"
item["image_path"] = image["image_path"]
helper.embedding_texts(texts=[image["blip_info"]],
metadatas=[item])
result_images.append(image["image_path"])
return result_images
@staticmethod
def resize_image(image_path: str):
# 判断是否存在图片
is_exist = os.path.exists(image_path) and os.path.isfile(image_path)
if is_exist is False:
raise ValueError("图片地址错误,请检查图片是否存在")
# 图片路径
output_image_path = image_path
# 打开原始图片
original_image = Image.open(image_path)
# 获取原始图片尺寸
width, height = original_image.size
# 逐步计算图片大小(按等比例放缩)
if width * height > 1024 * 1024:
size_is_approve: bool = False
resize_step = 0.1
while size_is_approve is False:
width = int(width * (1 - resize_step))
height = int(height * (1 - resize_step))
size_is_approve = width * height < 1024 * 1024
resize_step = resize_step + 0.1
# 调整图片大小
resized_image = original_image.resize((width, height))
# 保存调整后的图片
resized_image.save(output_image_path)
# 关闭图像
original_image.close()
# Path: utils/image_search/image_search_util_img.py
class Image_Search_Util_Img:
search_count_from_google: int = 10
search_count_from_embedding_db: int = 4
max_count_from_google: int = 40
collection_name = "images_source"
destination: str = "D:\\EasyGC\\images_download\\images"
cnn_model = None
db_collection: chromadb.Collection = None
cos_distance_threshold = 0.7
page_index: int = 1
blip_info: str = None
def __init__(self, **kwargs) -> None:
if "search_count_from_google" in kwargs:
self.search_count_from_google = int(
kwargs["search_count_from_google"])
if "search_count_from_embedding_db" in kwargs:
self.search_count_from_embedding_db = int(
kwargs["search_count_from_embedding_db"])
if "cos_distance_threshold" in kwargs:
self.cos_distance_threshold = float(
kwargs["cos_distance_threshold"])
if "page_index" in kwargs:
self.page_index = float(kwargs["page_index"])
if "max_count_from_google" in kwargs:
self.max_count_from_google = float(kwargs["max_count_from_google"])
# 初始化卷积网络模型
cnn_inner_model = VGG16(include_top=False)
self.cnn_model = Model(
inputs=cnn_inner_model.input,
outputs=cnn_inner_model.get_layer('block5_conv2').output)
# 初始化向量数据库
db = chromadb.Client(
settings=Settings(persist_directory=BASE_CONFIG["chromadb_path"],
is_persistent=True))
self.db_collection = db.get_or_create_collection(
name=self.collection_name, metadata={"hnsw:space": "cosine"})
# 提取图片文本摘要
def _blip_image(self, image_url: Union[list, str], prefix: str = None):
if not image_url:
raise ValueError("未能获取到对应图片地址,此参数不能为空")
blip_model = Blip_Model()
result: list = []
if isinstance(image_url, list):
for item in image_url:
blip_str = blip_model.generate_text_from_image(image_url=item,
text=prefix)
result.append(blip_str)
else:
blip_str = blip_model.generate_text_from_image(image_url=image_url,
text=prefix)
# blip_str = Translation_Baidu.excute_translation(query=blip_str,from_lang="en", to_lang="zh")
result.append(blip_str)
return result
# 将图片转换成向量
def _embedding_image(self, image_path: str):
img = cv2.imread(image_path)
# 将图像大小调整为VGG16模型的输入大小
img = cv2.resize(img, (224, 224))
# 将图像转换为4D张量(样本数量,行,列,通道数)
img = np.expand_dims(img, axis=0)
# 预处理图像以适应VGG16模型的输入要求
img = preprocess_input(img)
# 提取图像特征
features = self.cnn_model.predict(img)
# 将特征展平为一维向量
vector = features.flatten()
vector = np.array(vector).tolist()
return vector
# 1、在向量数据库进行搜索
def query_image_by_vector(self, image_path: str):
vector = self._embedding_image(image_path=image_path)
# 查询向量数据库
query_result = self.db_collection.query(
query_embeddings=vector,
n_results=self.search_count_from_embedding_db)
result = []
blips: list = None
if len(query_result["metadatas"][0]) > 0 and len(
query_result["distances"][0]) > 0:
for i in range(0, len(query_result["distances"][0])):
if query_result["distances"][0][
i] <= self.cos_distance_threshold:
result.append(query_result["metadatas"][0][i])
if len(result) <= 0:
blips = self._blip_image(image_url=image_path)
self.blip_info = blips[0]
return {
"original_vector": vector,
"search_result": result,
"original_blip": blips[0] if blips is not None else None
}
# 2、根据图片摘要搜索互联网
def search_image_by_google(self, text: str) -> list[dict]:
def download_images(image_urls: list):
images = []
for url in image_urls:
try:
image_name = str(uuid.uuid4())
local_path = f"{self.destination}\\{image_name}.jpg"
response = requests.get(url, stream=True)
response.raise_for_status()
with open(local_path, 'wb') as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
images.append(local_path)
except requests.exceptions.RequestException as e:
continue
return images
base_url = "https://www.googleapis.com/customsearch/v1"
_params = f"key={GOOGLE_APIKEY}&cx={GOOGLE_SEARCH_ID}"
content = []
start = (self.page_index - 1) * self.search_count_from_google
system_params = f"{_params}&q={text}&lr=lang_zh-CN&sort=review-rating:d:s&searchType=image&start={start}"
search_result = json.loads(
requests.get(url=f"{base_url}?{system_params}").text)
if search_result and "items" in search_result:
for item in search_result["items"]:
content.append(item["link"])
images = download_images(content)
return images
# 3、将从互联网搜索出来的图片进行摘要提取并与原始输入摘要/文本进行欧式计算,返回相似度最高的前面N条
def compare_google_and_orignal_image(self, google_result: list,
original_vector):
def get_distance(google_result: list, original_vector):
blip_google_results = []
for item in google_result:
try:
target_vector = self._embedding_image(item)
blip_google_results.append({
"image_path": item,
"vector": target_vector,
"distance": 0
})
except:
pass
result = []
for i in range(0, len(blip_google_results)):
try:
consine_distance = distance.cosine(
np.array(original_vector),
np.array(blip_google_results[i]["vector"]))
blip_google_results[i]["distance"] = consine_distance
if consine_distance <= self.cos_distance_threshold:
result.append(blip_google_results[i])
except:
pass
return result, blip_google_results
satisfy_list, all_search_list = get_distance(google_result,
original_vector)
total_search_times = int(self.max_count_from_google /
self.search_count_from_google)
cur_time_index = 1
while len(
satisfy_list
) < self.search_count_from_embedding_db and cur_time_index <= total_search_times:
self.page_index = self.page_index + 1
google_images = self.search_image_by_google(self.blip_info)
images, all_list = get_distance(google_images, original_vector)
satisfy_list.extend(images)
all_search_list.extend(all_list)
cur_time_index = cur_time_index + 1
if len(satisfy_list) < self.search_count_from_embedding_db:
res_count = self.search_count_from_embedding_db - len(satisfy_list)
res_list = sorted(all_search_list,
key=lambda x: float(x["distance"]),
reverse=False)[:res_count]
satisfy_list.extend(res_list)
# 删除多余文件
Image_Search_Util_Img.delete_files(satisfy_list, all_search_list)
return satisfy_list
# 4、将图片信息存入向量数据库,并返回存入成功的图片数据
def embedding_image_info(self, images: list):
if images is None or len(images) <= 0:
raise ValueError("images参数必须包含有效值")
result_images = []
for image in images:
if "image_path" not in image or "vector" not in image:
continue
id = str(uuid.uuid4())
self.db_collection.add(
ids=id,
embeddings=image["vector"],
metadatas={"image_path": image["image_path"]})
result_images.append(image["image_path"])
return result_images
def set_image_init_data(self, dic_path: str):
files = os.listdir(dic_path)
image_list = []
for file in files:
# 获取文件的完整路径
file_path = os.path.join(dic_path, file)
vector = self._embedding_image(file_path)
image_list.append({"image_path": file_path, "vector": vector})
return self.embedding_image_info(image_list)
@staticmethod
def delete_files(satisfy_list, all_search_list):
satisfy_image_paths = set(item['image_path'] for item in satisfy_list)
for item in all_search_list:
image_path = item['image_path']
if image_path not in satisfy_image_paths:
try:
os.remove(image_path)
except OSError as e:
pass
# Path: web_image_search.py
import gradio as gr
import os
import shutil
from PIL import Image
from sdxl.generate_style_config import style_list
from sdxl.sd_refiner_model import SD_Refiner_Model
from utils.image_search.image_search_util_txt import Image_Search_Util_Txt
from utils.image_search.image_search_util_img import Image_Search_Util_Img
style_name = [(i["name"]) for i in style_list]
with gr.Blocks() as web_gc:
selected_image: str = None
gr.HTML("""<h1 align="center">EasyGC_Image_Search_Generation</h1>""")
with gr.Row():
# 左侧搜索部分
with gr.Column(scale=6):
with gr.Row():
gallery_search = gr.Gallery(label="图像搜索结果",
show_label=False,
elem_id="gallery_search")
with gr.Row():
with gr.Column():
comment = gr.Textbox(lines=2,
show_label=False,
interactive=True)
image_dic = gr.Textbox(
lines=2,
label="图片源",
placeholder="请填写图片文件夹绝对路径,对图片进行向量化入库",
show_label=True,
interactive=True)
with gr.Column():
img = gr.Image(show_label=False,
height=200,
interactive=True,
type="filepath")
search_note_Btn = gr.Button("搜索", variant="primary")
init_image_Btn = gr.Button("向量化", variant="primary")
# 右侧生成部分
with gr.Column(scale=6):
gallery_generate = gr.Gallery(label="图像生成结果",
show_label=False,
elem_id="gallery_generate")
img_refiner = gr.Image(show_label=False,
height=120,
interactive=True,
type="filepath")
style_dropdown = gr.Dropdown(choices=style_name,
type="value",
value="",
show_label=True,
container=False,
multiselect=False,
interactive=True)
ok_note_Btn = gr.Button("生成", variant="primary")
def search_images(img, comment):
if img:
image_util = Image_Search_Util_Img()
query_result = image_util.query_image_by_vector(image_path=img)
# 向量数据库搜索到相关图片
if query_result["original_blip"] is None:
embedding_images = [
item["image_path"]
for item in query_result["search_result"]
]
else:
text = query_result["original_blip"]
original_vector = query_result["original_vector"]
google_images = image_util.search_image_by_google(text)
compare_result = image_util.compare_google_and_orignal_image(
google_result=google_images,
original_vector=original_vector)
embedding_images = image_util.embedding_image_info(
images=compare_result)
elif comment:
image_util = Image_Search_Util_Txt()
text = comment
embedding_images = image_util.search_embedding_by_text(text)
if embedding_images is None or len(embedding_images) <= 0:
google_images = image_util.search_image_by_google(text)
compare_result = image_util.compare_google_and_orignal_blipinfo(
google_result=google_images, original_text=text)
embedding_images = image_util.embedding_image_info(
images=compare_result)
else:
raise ValueError("图片与文本至少需要保证有一个值不为空")
return embedding_images
def generate_image_by_finner(img_refiner, style_dropdown):
Image_Search_Util_Txt.resize_image(image_path=img_refiner)
if img_refiner is not None:
style = {}
for i in style_list:
if i["name"] == style_dropdown:
style = i
break
sd_model = SD_Refiner_Model().instance(is_combine_base=False)
sd_model.load_model()
prompt = style["prompt"].format(prompt="").lower()
negative_prompt = style["negative_prompt"]
images = sd_model.get_image_to_image_single_prompt(
query=prompt,
image_url=img_refiner,
image_count=4,
negative_prompt=negative_prompt)
# 关闭SDXL模型,释放显卡资源
sd_model.unload_model()
return images
return None
def init_image_db(image_dic):
image_util = Image_Search_Util_Img()
images = image_util.set_image_init_data(image_dic)
return images
search_note_Btn.click(search_images,
inputs=[img, comment],
outputs=[gallery_search],
show_progress=True)
ok_note_Btn.click(generate_image_by_finner,
inputs=[img_refiner, style_dropdown],
| outputs=[gallery_generate]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kiharalab/Distance-AF
# Path: protein_utils/residue_constants.py
def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],
def make_bond_key(atom1_name, atom2_name):
def sequence_to_onehot(
sequence: str,
mapping: Mapping[str, int],
map_unknown_to_x: bool = False) -> np.ndarray:
def _make_standard_atom_mask() -> np.ndarray:
def chi_angle_atom(atom_index: int) -> np.ndarray:
def _make_rigid_transformation_4x4(ex, ey, translation):
def _make_rigid_group_constants():
def make_atom14_dists_bounds(overlap_tolerance=1.5,
bond_length_tolerance_factor=15):
HHBLITS_AA_TO_ID = {
'A': 0,
'B': 2,
'C': 1,
'D': 2,
'E': 3,
'F': 4,
'G': 5,
'H': 6,
'I': 7,
'J': 20,
'K': 8,
'L': 9,
'M': 10,
'N': 11,
'O': 20,
'P': 12,
'Q': 13,
'R': 14,
'S': 15,
'T': 16,
'U': 1,
'V': 17,
'W': 18,
'X': 20,
'Y': 19,
'Z': 3,
'-': 21,
}
ID_TO_HHBLITS_AA = {
0: 'A',
1: 'C', # Also U.
2: 'D', # Also B.
3: 'E', # Also Z.
4: 'F',
5: 'G',
6: 'H',
7: 'I',
8: 'K',
9: 'L',
10: 'M',
11: 'N',
12: 'P',
13: 'Q',
14: 'R',
15: 'S',
16: 'T',
17: 'V',
18: 'W',
19: 'Y',
20: 'X', # Includes J and O.
21: '-',
}
MAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(
restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])
for i in range(len(restypes_with_x_and_gap)))
STANDARD_ATOM_MASK = _make_standard_atom_mask()
# Path: protein_utils/affine_utils.py
class T:
def __init__(self, rots, trans):
self.rots = rots
self.trans = trans
if self.rots is None and self.trans is None:
raise ValueError("Only one of rots and trans can be None")
elif self.rots is None:
self.rots = T.identity_rot(
self.trans.shape[:-1],
self.trans.dtype,
self.trans.device,
self.trans.requires_grad,
)
elif self.trans is None:
self.trans = T.identity_trans(
self.rots.shape[:-2],
self.rots.dtype,
self.rots.device,
self.rots.requires_grad,
)
if (
self.rots.shape[-2:] != (3, 3)
or self.trans.shape[-1] != 3
or self.rots.shape[:-2] != self.trans.shape[:-1]
):
raise ValueError("Incorrectly shaped input")
def __getitem__(self, index):
if type(index) != tuple:
index = (index,)
return T(
self.rots[index + (slice(None), slice(None))],
self.trans[index + (slice(None),)],
)
def __eq__(self, obj):
return torch.all(self.rots == obj.rots) and torch.all(
self.trans == obj.trans
)
def __mul__(self, right):
rots = self.rots * right[..., None, None]
trans = self.trans * right[..., None]
return T(rots, trans)
def __rmul__(self, left):
return self.__mul__(left)
@property
def shape(self):
s = self.rots.shape[:-2]
return s if len(s) > 0 else torch.Size([1])
def get_trans(self):
return self.trans
def get_rots(self):
return self.rots
def compose(self, t):
rot_1, trn_1 = self.rots, self.trans
rot_2, trn_2 = t.rots, t.trans
rot = rot_matmul(rot_1, rot_2)
trn = rot_vec_mul(rot_1, trn_2) + trn_1
return T(rot, trn)
def apply(self, pts):
r, t = self.rots, self.trans
rotated = rot_vec_mul(r, pts)
return rotated + t
def invert_apply(self, pts):
r, t = self.rots, self.trans
pts = pts - t
return rot_vec_mul(r.transpose(-1, -2), pts)
def invert(self):
rot_inv = self.rots.transpose(-1, -2)
trn_inv = rot_vec_mul(rot_inv, self.trans)
return T(rot_inv, -1 * trn_inv)
def unsqueeze(self, dim):
if dim >= len(self.shape):
raise ValueError("Invalid dimension")
rots = self.rots.unsqueeze(dim if dim >= 0 else dim - 2)
trans = self.trans.unsqueeze(dim if dim >= 0 else dim - 1)
return T(rots, trans)
@staticmethod
def identity_rot(shape, dtype, device, requires_grad):
rots = torch.eye(
3, dtype=dtype, device=device, requires_grad=requires_grad
)
rots = rots.view(*((1,) * len(shape)), 3, 3)
rots = rots.expand(*shape, -1, -1)
return rots
@staticmethod
def identity_trans(shape, dtype, device, requires_grad):
trans = torch.zeros(
(*shape, 3), dtype=dtype, device=device, requires_grad=requires_grad
)
return trans
@staticmethod
def identity(shape, dtype, device, requires_grad=True):
return T(
T.identity_rot(shape, dtype, device, requires_grad),
T.identity_trans(shape, dtype, device, requires_grad),
)
@staticmethod
def from_4x4(t):
rots = t[..., :3, :3]
trans = t[..., :3, 3]
return T(rots, trans)
def to_4x4(self):
tensor = self.rots.new_zeros((*self.shape, 4, 4))
tensor[..., :3, :3] = self.rots
tensor[..., :3, 3] = self.trans
tensor[..., 3, 3] = 1
return tensor
@staticmethod
def from_tensor(t):
return T.from_4x4(t)
@staticmethod
def from_3_points(p_neg_x_axis, origin, p_xy_plane, eps=1e-8):
p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)
origin = torch.unbind(origin, dim=-1)
p_xy_plane = torch.unbind(p_xy_plane, dim=-1)
e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]
e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]
denom = torch.sqrt(sum((c * c for c in e0)) + eps)
e0 = [c / denom for c in e0]
dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
denom = torch.sqrt(sum((c * c for c in e1)) + eps)
e1 = [c / denom for c in e1]
e2 = [
e0[1] * e1[2] - e0[2] * e1[1],
e0[2] * e1[0] - e0[0] * e1[2],
e0[0] * e1[1] - e0[1] * e1[0],
]
rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)
rots = rots.reshape(rots.shape[:-1] + (3, 3))
return T(rots, torch.stack(origin, dim=-1))
@staticmethod
def concat(ts, dim):
rots = torch.cat([t.rots for t in ts], dim=dim if dim >= 0 else dim - 2)
trans = torch.cat(
[t.trans for t in ts], dim=dim if dim >= 0 else dim - 1
)
return T(rots, trans)
def map_tensor_fn(self, fn):
"""
Apply a function that takes a tensor as its only argument to the
rotations and translations, treating the final two/one
dimension(s), respectively, as batch dimensions.
E.g.: Given t, an instance of T of shape [N, M], this function can
be used to sum out the second dimension thereof as follows:
t = t.map_tensor_fn(lambda x: torch.sum(x, dim=-1))
The resulting object has rotations of shape [N, 3, 3] and
translations of shape [N, 3]
"""
rots = self.rots.view(*self.rots.shape[:-2], 9)
rots = torch.stack(list(map(fn, torch.unbind(rots, -1))), dim=-1)
rots = rots.view(*rots.shape[:-1], 3, 3)
trans = torch.stack(list(map(fn, torch.unbind(self.trans, -1))), dim=-1)
return T(rots, trans)
def stop_rot_gradient(self):
return T(self.rots.detach(), self.trans)
def scale_translation(self, factor):
return T(self.rots, self.trans * factor)
@staticmethod
def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):
translation = -1 * c_xyz
n_xyz = n_xyz + translation
c_xyz = c_xyz + translation
c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)
sin_c1 = -c_y / norm
cos_c1 = c_x / norm
zeros = sin_c1.new_zeros(sin_c1.shape)
ones = sin_c1.new_ones(sin_c1.shape)
c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))
c1_rots[..., 0, 0] = cos_c1
c1_rots[..., 0, 1] = -1 * sin_c1
c1_rots[..., 1, 0] = sin_c1
c1_rots[..., 1, 1] = cos_c1
c1_rots[..., 2, 2] = 1
norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)
sin_c2 = c_z / norm
cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm
c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
c2_rots[..., 0, 0] = cos_c2
c2_rots[..., 0, 2] = sin_c2
c2_rots[..., 1, 1] = 1
c1_rots[..., 2, 0] = -1 * sin_c2
c1_rots[..., 2, 2] = cos_c2
c_rots = rot_matmul(c2_rots, c1_rots)
n_xyz = rot_vec_mul(c_rots, n_xyz)
_, n_y, n_z = [n_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)
sin_n = -n_z / norm
cos_n = n_y / norm
n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
n_rots[..., 0, 0] = 1
n_rots[..., 1, 1] = cos_n
n_rots[..., 1, 2] = -1 * sin_n
n_rots[..., 2, 1] = sin_n
n_rots[..., 2, 2] = cos_n
rots = rot_matmul(n_rots, c_rots)
rots = rots.transpose(-1, -2)
translation = -1 * translation
return T(rots, translation)
def cuda(self):
return T(self.rots.cuda(), self.trans.cuda())
# Path: train_utils/tensor_utils.py
def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
def flatten_final_dims(t: torch.Tensor, no_dims: int):
def masked_mean(mask, value, dim, eps=1e-4):
def pts_to_distogram(pts, min_bin=2.3125, max_bin=21.6875, no_bins=64):
def dict_multimap(fn, dicts):
def one_hot(x, v_bins):
def batched_gather(data, inds, dim=0, no_batch_dims=0):
def dict_map(fn, dic, leaf_type):
def tree_map(fn, tree, leaf_type):
def chunk_layer(
layer: Callable,
inputs: Dict[str, Any],
chunk_size: int,
no_batch_dims: int,
) -> Any:
def fetch_dims(tree):
def prep_inputs(t):
def assign(d1, d2):
# Path: Loss/openfold_loss.py
import numpy as np
import torch
import torch.nn as nn
import logging
from typing import Dict, Optional, Tuple
from protein_utils import residue_constants
from protein_utils.affine_utils import T
from train_utils.tensor_utils import (
tree_map,
tensor_tree_map,
masked_mean,
permute_final_dims,
batched_gather,
)
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#from functools import partial
#from torch.distributions.bernoulli import Bernoulli
#from openfold.utils import feats
def softmax_cross_entropy(logits, labels):
loss = -1 * torch.sum(
labels * torch.nn.functional.log_softmax(logits, dim=-1),
dim=-1,
)
return loss
def sigmoid_cross_entropy(logits, labels):
log_p = torch.nn.functional.logsigmoid(logits)
log_not_p = torch.nn.functional.logsigmoid(-logits)
loss = -labels * log_p - (1 - labels) * log_not_p
return loss
def torsion_angle_loss(
a, # [*, N, 7, 2]
a_gt, # [*, N, 7, 2]
a_alt_gt, # [*, N, 7, 2]
):
# [*, N, 7]
norm = torch.norm(a, dim=-1)
# [*, N, 7, 2]
a = a / norm.unsqueeze(-1)
# [*, N, 7]
diff_norm_gt = torch.norm(a - a_gt, dim=-1)
diff_norm_alt_gt = torch.norm(a - a_alt_gt, dim=-1)
min_diff = torch.minimum(diff_norm_gt ** 2, diff_norm_alt_gt ** 2)
# [*]
l_torsion = torch.mean(min_diff, dim=(-1, -2))
l_angle_norm = torch.mean(torch.abs(norm - 1), dim=(-1, -2))
an_weight = 0.02
return l_torsion + an_weight * l_angle_norm
def compute_fape(
pred_frames: T,
target_frames: T,
frames_mask: torch.Tensor,
pred_positions: torch.Tensor,
target_positions: torch.Tensor,
positions_mask: torch.Tensor,
| length_scale: float, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: HangBack/nodoc
# Path: structure/tree.py
class Node(metaclass=abc.ABCMeta):
def __init__(self, **data) -> None:
"""
节点
- 关键字参数:
- **data: Any, 节点保存的数据
- 属性:
- parent: Node, 该节点的父节点
- left: Node, 该节点的左节点
- right: Node, 该节点的左节点
- children: list[Node], 该节点的所有子节点
- visited: bool, 该节点是否被访问
"""
self._parent: Node = None # 父节点
self._left: Node = None # 左节点
self._right: Node = None # 右节点
self._children: Nodes = Nodes([]) # 子节点列表
self._data = data # 节点数据
self._visited: bool = False # 是否被访问
self._count: int = 1 # 包括自己在内的节点数量(包含深度)
self._depth: int = 0 # 节点的深度
self._tree: Tree = None
self.seeker: Node
@property
def tree(self) -> 'Tree':
return self._tree
@tree.setter
def tree(self, tree: 'Tree'):
if not isinstance(tree, Tree):
raise TypeError(f'期望:{Tree},实际:{type(tree)}')
self._tree = tree
@property
def depth(self) -> int:
return self._depth
@depth.setter
def depth(self, value: 'int') -> int:
if not isinstance(value, int):
raise TypeError(f'期望:{int},实际:{type(value)}')
self._depth = value
@property
def visited(self) -> bool:
"是否被访问"
return self._visited
@visited.setter
def visited(self, value: bool) -> bool:
self._visited = value
@property
def parent(self):
"""
父节点
- setter:
- node: `Node`,设置该节点的父亲节点
"""
return self._parent
@parent.setter
def parent(self, node: 'Node'):
if not isinstance(node, Node):
raise TypeError(f'期望:{Node},实际:{type(node)}')
if self._parent:
self._parent._children.remove(self)
self._parent = node
self._parent._children.append(self)
self.depth = self._parent.depth + 1 # 节点深度 + 1
auto_tree_updater(self, node)
@property
def left(self) -> 'Node':
"""
兄弟节点-左
- node: `Node`,设置该节点的左节点
"""
return self._left
@left.setter
def left(self, node: 'Node'):
if not isinstance(node, Node):
raise TypeError(f'期望:{Node},实际:{type(node)}')
self._left = node
if self._left.right is not self: # 同时防止递归过深
self._left.right = self
auto_tree_updater(self, node)
@property
def right(self) -> 'Node':
"""
兄弟节点-右
- node: `Node`,设置该节点的右节点
"""
return self._right
@right.setter
def right(self, node: 'Node'):
if not isinstance(node, Node):
raise TypeError(f'期望:{Node},实际:{type(node)}')
self._right = node
if self._right.left is not self: # 同时防止递归过深
self._right.left = self
auto_tree_updater(self, node)
@property
def children(self) -> 'Nodes':
"子节点"
return self._children
@children.setter
def children(self, value):
self._children = value
@property
def data(self):
"节点数据"
return self._data
@data.setter
def data(self, value: Any):
self._data = value
@property
def count(self) -> int:
"节点数量"
return self._count
@count.setter
def count(self, value: int):
self._count = value
@property
def type(self):
"节点类型"
return type(self._data)
@abc.abstractmethod
def get_route(self, endpoint: 'Node', condition: Callable[['Node'], bool] = lambda node: True) -> 'Nodes':
"""
获取子父节点的路由 -> 从该节点到目标节点之间的所有节点(不包括该节点)。
- endpoint: Node,该节点的目标节点。
- condition: Callable, 每个节点
"""
if not isinstance(endpoint, Node):
raise TypeError(f'期望:{Node},实际:{type(endpoint)}')
elif self is endpoint:
return None
result: list['Node'] = []
if self.depth < endpoint.depth:
current_node = endpoint
endpoint = self
reverse = False
else:
current_node = self
reverse = True
while (
current_node.parent is not None and
current_node is not endpoint
):
current_node = current_node.parent
if condition(current_node): # 满足条件则追加该节点
result.append(current_node)
if reverse:
result.reverse()
if len(result) == 0:
return None
return Nodes(result)
def __lshift__(self, other):
if isinstance(other, Node):
return other.get_route(self)
def __rshift__(self, other):
if isinstance(other, Node):
return self.get_route(other)
def __matmul__(self, other):
if isinstance(other, Node):
self.parent = other
return other
elif isinstance(other, Tree):
self._tree = other
return other
def __and__(self, other):
if isinstance(other, Node):
self.right = other
return Nodes([self, other])
elif isinstance(other, Nodes):
self & other[0]
other.prepend(self)
# Path: structure/tree.py
class Nodes(metaclass=abc.ABCMeta):
def __init__(self, nodes: list[Node]) -> None:
self.data: deque[Node] = deque(nodes)
def __matmul__(self, other):
for node in self.data:
node @ other
return other
def __and__(self, other):
if isinstance(other, Node):
self[-1] & other
self.append(other)
return other
def __getitem__(self, index) -> Node:
return self.data[index]
def __iter__(self):
for node in self.data:
yield node
def prepend(self, node: Node):
self.data.appendleft(node)
def append(self, node: Node):
self.data.append(node)
def remove(self, node: Node):
self.data.remove(node)
def __str__(self) -> str:
return "".join([str(node) for node in self.data])
# Path: structure/tree.py
class Tree(metaclass=abc.ABCMeta):
def __init__(self, root: Node, name: str = '无名树') -> None:
"""
实例化一个基本树对象。
- root: Node, 树的根节点。
- name: str, 树的名字,用于查询。
"""
self.name: str = name
self.root: Node = root
self.current_node: Node = root
self.__nodecount: int = 1
for node in self.DFT():
node @ self
@property
def nodecount(self) -> int:
return self.__nodecount
"""
栈
queue = [time1 -> a, time2 -> b...]
以下循环:直到栈空
栈出
queue.popleft() 现在 queue 相当于 [time2 -> b, time3 -> c...]
栈入
queue.extend(nodeList) 现在 queue 相当于 [time2 -> b, time3 -> c...timeN1 -> Na, timeN2 -> Nb...]
"""
def BFS(self, callback: Callable[[Node], bool] = lambda: True) -> Node | None:
"""
广度优先搜索(层次搜索)。
- callback: Callable, 用于判断node是否符合指定条件。
"""
visited = set()
queue = deque([self.root])
while queue:
current_node = queue.popleft()
if callback(current_node):
return current_node # 第一个符合条件的节点
visited.add(current_node)
queue.extend(child
for child in current_node.children
if child not in visited
and
child not in queue)
return None # 没有任何满足条件的节点
@abc.abstractmethod
def BFT(self, callback: Callable[[Node], bool] = lambda: True) -> Nodes | None:
"""
广度优先遍历(层次遍历)
- callback: Callable, 用于判断node是否符合指定条件。
"""
visited = set()
queue = deque([self.root])
result = []
while queue:
current_node = queue.popleft()
if callback(current_node):
result.append(current_node) # 追加一个符合条件的节点
visited.add(current_node)
queue.extend(child
for child in current_node.children
if child not in visited
and
child not in queue)
return Nodes(result)
"""
递归
从当前节点开始优先找子节点,子节点也优先找子节点,直到到底了都还没找到则返回父节点并找该节点的另一个子节点,以此类推
"""
def DFS(self, node=None, callback: Callable[[Node], bool] = lambda: True) -> Node | None:
"""
深度优先搜索。
- node: Node, 起点节点,默认为根节点。
- callback: Callable, 用于判断node是否符合指定条件。
"""
result = None
if node is None:
node = self.root
if callback(node):
return node # 找到符合条件的节点
node.visited = True
for child in node.children:
if not child.visited:
result = self.DFS(child, callback)
if result is not None:
return result # 在子树中找到符合条件的节点
node.visited = False
return None # 在当前子树未找到符合条件的节点
@abc.abstractmethod
def DFT(self, node=None, callback: Callable[[Node], bool] = lambda: True) -> Nodes | None:
"""
深度优先遍历。
- node: Node, 起点节点,默认为根节点。
- callback: Callable, 用于判断node是否符合指定条件。
"""
result = None
if node is None:
node = self.root
result = []
if callback(node):
result.append(node) # 找到符合条件的节点
node.visited = True
for child in node.children:
if not child.visited:
current_node = self.DFT(child, callback)
result.extend(current_node)
node.visited = False
return Nodes(result)
# Path: structure/doc_tree.py
import time
import sys
from typing import Any, Callable, Self, TypedDict, Unpack, Literal
from nodoc import const
from nodoc.document.base import Document
from .tree import Node, Nodes, Tree
raise TypeError(f'期望:bool,实际:{type(value)}')
self._isImage = value
def __eq__(self, __value: object) -> bool:
if __value is splitSign:
return len(self.children) == 0
return super().__eq__(__value)
def __rshift__(self, other):
result = super().__rshift__(other)
if result is None:
return None
return docNodes(result)
def __lshift__(self, other):
result = super().__lshift__(other)
if result is None:
return None
return docNodes(result)
def get_route(self, endpoint: 'docNode', condition: Callable[['docNode'], bool] = lambda node: True) -> 'docNodes':
"""
获取子父节点的路由 -> 默认是从该节点到目标节点之间的所有标题节点(不包括该节点)。
- endpoint: Node,该节点的目标节点。
"""
if condition is None:
def condition(node: docNode):
return node.isTitle
result = super().get_route(endpoint, condition)
if result is None:
return None
return docNodes(list(result))
def __matmul__(self, other):
result = super().__matmul__(other)
return result
def __str__(self):
return f"""
标头:{self.data['head']}
内容:{self.data['content']}
类型:{self.data['kind']}
路由:{self >> self.tree.root}
"""
class docNodes(Nodes):
def __init__(self, nodes: list[docNode]) -> None:
super().__init__(nodes)
def prepend(self, node: docNode):
return super().prepend(node)
def append(self, node: docNode):
return super().append(node)
def remove(self, node: docNode):
return super().remove(node)
def __str__(self) -> str:
return "/".join([node.data['content'] for node in self.data])
class docTree(Tree):
def __init__(self, root: docNode, name: str = '文档树', **data: Unpack[docArg]) -> None:
"""
实例化一个文档树对象。
- root: docNode, 文档树的根节点。
- name: str, 文档树的名称,用于查询。
"""
data.setdefault('head', None)
data.setdefault('metadata', {
'create_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
'modify_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
'visit_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
})
if not isinstance(root, docNode):
raise TypeError(f'期望:docNode,实际:{type(root)}')
super().__init__(root, name)
self.data = data
self.root: docNode
def update(self):
self.data['metadata'].setdefault('size', sys.getsizeof(self.document))
def from_document(self, document: Document):
...
# @override
def DFT(self, node=None, callback: Callable[[Node], bool] = lambda node: True) -> list[docNode] | None:
result = super().DFT(node, callback)
return docNodes(result)
# @override
def BFT(self, callback: Callable[[Node], bool] = lambda node: True) -> list[docNode] | None:
result = super().BFT(callback)
return docNodes(result)
def __str__(self):
document = None
if hasattr(self, 'document'):
if len(self.document) <= 12:
document = self.document.replace('\n', '')
else:
prefix = self.document[0:5].replace('\n', '')
char_count = f'...({len(self.document) - 10}字)...'
suffix = self.document[-6:-1].replace('\n', '')
document = prefix + char_count + suffix
result = f"""
{self.name}
- 创建时间:{self.data['metadata']['create_time']}
- 修改时间:{self.data['metadata']['modify_time']}
- 访问时间:{self.data['metadata']['visit_time']}
- 文档大小:{const.get_size(self.data['metadata']['size'])}
| - 文档内容:{document} |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kai-wen-yang/QVix
# Path: models/instruct_blip/common/dist_utils.py
def setup_for_distributed(is_master):
def print(*args, **kwargs):
def is_dist_avail_and_initialized():
def get_world_size():
def get_rank():
def is_main_process():
def init_distributed_mode(args):
def get_dist_info():
def main_process(func):
def wrapper(*args, **kwargs):
def download_cached_file(url, check_hash=True, progress=False):
def get_cached_file_path():
# Path: models/instruct_blip/common/dist_utils.py
def download_cached_file(url, check_hash=True, progress=False):
"""
Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.
If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.
"""
def get_cached_file_path():
# a hack to sync the file path across processes
parts = torch.hub.urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(timm_hub.get_cache_dir(), filename)
return cached_file
if is_main_process():
timm_hub.download_cached_file(url, check_hash, progress)
if is_dist_avail_and_initialized():
dist.barrier()
return get_cached_file_path()
# Path: models/instruct_blip/common/utils.py
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
# Path: models/instruct_blip/common/logger.py
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = [
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
if torch.cuda.is_available():
log_msg.append("max mem: {memory:.0f}")
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
# Path: models/instruct_blip/models/base_model.py
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[0].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
def load_checkpoint_from_config(self, cfg, **kwargs):
"""
Load checkpoint as specified in the config file.
If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model.
When loading the pretrained model, each task-specific architecture may define their
own load_from_pretrained() method.
"""
load_finetuned = cfg.get("load_finetuned", True)
if load_finetuned:
finetune_path = cfg.get("finetuned", None)
assert (
finetune_path is not None
), "Found load_finetuned is True, but finetune_path is None."
self.load_checkpoint(url_or_filename=finetune_path)
else:
load_pretrained = cfg.get("load_pretrained", True)
if load_pretrained:
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
assert "Found load_finetuned is False, but pretrain_path is None."
self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs)
def before_evaluation(self, **kwargs):
pass
def show_n_params(self, return_str=True):
tot = 0
for p in self.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return "{:.1f}M".format(tot / 1e6)
else:
return "{:.1f}K".format(tot / 1e3)
else:
return tot
# Path: models/instruct_blip/models/blip2_models/Qformer.py
class BertEmbeddings(nn.Module):
class BertSelfAttention(nn.Module):
class BertSelfOutput(nn.Module):
class BertAttention(nn.Module):
class BertIntermediate(nn.Module):
class BertOutput(nn.Module):
class BertLayer(nn.Module):
class BertEncoder(nn.Module):
class BertPooler(nn.Module):
class BertPredictionHeadTransform(nn.Module):
class BertLMPredictionHead(nn.Module):
class BertOnlyMLMHead(nn.Module):
class BertPreTrainedModel(PreTrainedModel):
class BertModel(BertPreTrainedModel):
class BertLMHeadModel(BertPreTrainedModel):
class BertForMaskedLM(BertPreTrainedModel):
def __init__(self, config):
def forward(
self,
input_ids=None,
position_ids=None,
query_embeds=None,
past_key_values_length=0,
):
def __init__(self, config, is_cross_attention):
def save_attn_gradients(self, attn_gradients):
def get_attn_gradients(self):
def save_attention_map(self, attention_map):
def get_attention_map(self):
def transpose_for_scores(self, x):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
def __init__(self, config):
def forward(self, hidden_states, input_tensor):
def __init__(self, config, is_cross_attention=False):
def prune_heads(self, heads):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, hidden_states, input_tensor):
def __init__(self, config, layer_num):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
query_length=0,
):
def feed_forward_chunk(self, attention_output):
def feed_forward_chunk_query(self, attention_output):
def __init__(self, config):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
query_length=0,
):
def create_custom_forward(module):
def custom_forward(*inputs):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, sequence_output):
def _init_weights(self, module):
def __init__(self, config, add_pooling_layer=False):
def get_input_embeddings(self):
def set_input_embeddings(self, value):
def _prune_heads(self, heads_to_prune):
def get_extended_attention_mask(
self,
attention_mask: Tensor,
input_shape: Tuple[int],
device: device,
is_decoder: bool,
has_query: bool = False,
) -> Tensor:
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
):
def __init__(self, config):
def get_output_embeddings(self):
def set_output_embeddings(self, new_embeddings):
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=True,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=True,
reduction="mean",
):
def prepare_inputs_for_generation(
self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs
):
def _reorder_cache(self, past, beam_idx):
def __init__(self, config):
def get_output_embeddings(self):
def set_output_embeddings(self, new_embeddings):
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=False,
):
# Path: models/instruct_blip/models/eva_vit.py
def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
img_size=img_size,
patch_size=14,
use_mean_pooling=False,
embed_dim=1408,
depth=39,
num_heads=1408//88,
mlp_ratio=4.3637,
qkv_bias=True,
drop_path_rate=drop_path_rate,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
use_checkpoint=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth"
cached_file = download_cached_file(
url, check_hash=False, progress=True
)
state_dict = torch.load(cached_file, map_location="cpu")
interpolate_pos_embed(model,state_dict)
incompatible_keys = model.load_state_dict(state_dict, strict=False)
# print(incompatible_keys)
if precision == "fp16":
# model.to("cuda")
convert_weights_to_fp16(model)
return model
# Path: models/instruct_blip/models/clip_vit.py
def create_clip_vit_L(img_size=224,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
input_resolution=img_size,
patch_size=14,
width=1024,
layers=23,
heads=16,
use_grad_checkpointing=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth"
cached_file = download_cached_file(
url, check_hash=False, progress=True
)
state_dict = torch.load(cached_file, map_location="cpu")
interpolate_pos_embed(model,state_dict)
incompatible_keys = model.load_state_dict(state_dict, strict=False)
# print(incompatible_keys)
if precision == "fp16":
convert_weights_to_fp16(model)
return model
# Path: models/instruct_blip/models/blip2_models/blip2.py
import contextlib
import logging
import os
import time
import datetime
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
import spacy
from ...common import dist_utils as dist_utils
from ...common.dist_utils import download_cached_file
from ...common.utils import is_url
from ...common.logger import MetricLogger
from ..base_model import BaseModel
from ..blip2_models.Qformer import BertConfig, BertLMHeadModel
from ..eva_vit import create_eva_vit_g
from ..clip_vit import create_clip_vit_L
from transformers import BertTokenizer
# elif model_name == "eva2_clip_L":
# visual_encoder = create_eva2_vit_L(
# img_size, drop_path_rate, use_grad_checkpoint, precision
# )
elif model_name == "clip_L":
visual_encoder = create_clip_vit_L(img_size, use_grad_checkpoint, precision)
ln_vision = LayerNorm(visual_encoder.num_features)
self.vit_name = model_name
return visual_encoder, ln_vision
def load_from_pretrained(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
for key, value in checkpoint['model'].items():
print(key)
state_dict = checkpoint["model"]
msg = self.load_state_dict(state_dict, strict=False)
# logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
def get_optimizer_params(self, weight_decay, lr_scale=1):
if self.vit_name == "eva_clip_g":
vit_num_layers = self.visual_encoder.get_num_layer()
lr_scales = list(lr_scale ** (vit_num_layers + 1 - i) for i in range(vit_num_layers + 2))
parameter_group_names = {}
parameter_group_vars = {}
for name, param in self.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias"):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if 'visual_encoder' in name:
layer_id = self.visual_encoder.get_num_layer(name.replace('visual_encoder.',''))
group_name = "vit_layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if layer_id is not None:
scale = lr_scales[layer_id]
else:
scale = 1
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
# import json
# print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
optim_params = list(parameter_group_vars.values())
return optim_params
else:
return super().get_optimizer_params(weight_decay,lr_scale)
def _lemmatize(self, answers):
def apply(answer):
doc = self.lemmatizer(answer)
words = []
for token in doc:
if token.pos_ in ["NOUN", "VERB"]:
words.append(token.lemma_)
else:
words.append(token.text)
answer = " ".join(words)
return answer
return [apply(answer) for answer in answers]
@property
def lemmatizer(self):
if self._lemmatizer is None:
try:
self._lemmatizer = spacy.load("en_core_web_sm")
except ImportError:
logging.error(
"""
Please install spacy and en_core_web_sm model to apply lemmatization.
python -m spacy download en_core_web_sm
OR
import spacy.cli
spacy.cli.download("en_core_web_sm")
"""
)
exit(1)
return self._lemmatizer
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
| class LayerNorm(nn.LayerNorm): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: inferigang/breads
# Path: src/ui/banner.py
def get_banner() -> None:
''' Return BREADS banner url '''
banner_list: list = [
"https://pastebin.com/raw/mhEASUuU",
"https://pastebin.com/raw/CFEzbcVh",
"https://pastebin.com/raw/yVe8AAud",
"https://pastebin.com/raw/iKR7qDAM",
"https://pastebin.com/raw/fSzQMQ27",
"https://pastebin.com/raw/kGqfqWRc"
]
banner = choice(banner_list)
try:
response = get(banner, verify=False)
banner = response.text
current_version = "Breaking Active Directory Security by @opps3c\nVersion: v1.1.2"
print(f"[bold {get_random_color()}]{banner}\n{current_version}\n\nType 'help' to list commands[/]")
except RequestException as error:
print(f"[red][!][/][bright_white] Error when requesting banner from pastebin: {error}[/]")
pass
# Path: src/handlers/profile.py
def create_profile_folder() -> None:
''' Create the profile folder name based on user input in /home/user/.breads/profile_name/ '''
global profile_name
profile_name_input = Prompt.ask("[yellow]# Type the profile name[/]")
profile_name = profile_name_input
print(f"\n[green][✔][/] [bright_white]Creating [b]{profile_name}'s[/] profile folder [/]")
create_breads_directory()
folder_name = f"{BREADS_FOLDER}/{profile_name}"
if not path.exists(folder_name):
mkdir(folder_name)
print(f"[green][+][/] [bright_white]{folder_name} folder created[/]")
create_profile_base_json()
else:
print(f"[red][!][/] [bright_white]{folder_name} profile already exists[/]")
return
# Path: src/handlers/profile.py
def load_profile() -> None:
''' Loads a profile based on user input if at least one exists on breads '''
if not path.exists(BREADS_FOLDER):
print(f"[red][!][/] .breads directory not found. Initialize one with 'create_profile' command\n")
return
if path.exists(BREADS_FOLDER):
folders_list = listdir(BREADS_FOLDER)
if not folders_list:
print(f"[red][!][/] No profiles found on .breads directory. Create one with 'create_profile' command\n")
return
for folder_name in folders_list:
print(f"[cyan]* {folder_name}[/]")
load_user_input = Prompt.ask("\n# Type the profile name to be used")
for folder_name in folders_list:
if(load_user_input == folder_name):
print(f"[green][+][/] [bright_white]Profile [b]{folder_name}'s[/b] selected successfully!. Load it with the 'load_profile' command[/]")
environ["breads_profile"] = folder_name
profile_json_file = f"{BREADS_FOLDER}/{get_current_profile()}/settings.json"
with open(profile_json_file, 'r+') as json_file:
existing_data = json.load(json_file)
host = existing_data['host']
username = existing_data['username']
password = existing_data['password']
if(len(host) > 2): # If the length of host variable on profile json file is greater than 2 we can assume we already have an host defined
print(f"[yellow][!][/] [bright_white]Profile settings: {host}, {username}, {password}[/]")
keep_data_input = Prompt.ask("[yellow][!][/] [bright_white]There is already information stored in this profile, do you want to keep it? [Y/N][/]")
keep_data_input = keep_data_input.lower()
if(keep_data_input == 'y'):
print("[yellow][!][/] [bright_white]Not changing current configuration[/]\n")
return
else:
pass
target_host_input = Prompt.ask("# Type the target host (ex: 127.0.0.1)")
username_input = Prompt.ask("# Type the username to be used (example.lab/Administrator)")
password_input = Prompt.ask("# Type the password to be used")
profile_data = {
"host": target_host_input,
"username": username_input,
"password": password_input
}
try:
existing_data.update(profile_data)
json_file.seek(0)
json.dump(existing_data, json_file, ensure_ascii=False, indent=4)
json_file.truncate()
print(f"[green][+][/] [bright_white]Profile information stored successfully![/]\n")
except Exception as error:
print(f"[red][!][/] [bright_white]Error when trying to store profile information: {error}[/]")
else:
print(f"[red][!][/] [bright_white]Profile not found, check if the name is correct[/]")
# Path: src/helpers/user.py
def get_current_profile() -> None:
''' Get current user profile name based on environment variables (breads_profile) '''
profile = environ.get("breads_profile") if environ.get("breads_profile", "None") else ""
return str(profile)
# Path: src/modules/enum/domain_controllers.py
def get_domain_controllers() -> None:
''' Get all the Domain Controllers name '''
#search_filter = '(primaryGroupID=516)' # 516 is the Primary ID for Domain Controllers computers
search_filter = '(&(objectCategory=Computer)(userAccountControl:1.2.840.113556.1.4.803:=8192))'
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] [bright_white]Domain Controller(s) Name(s):[/]")
for _dn, attrs in query: # dn is just here to be possible to get the attrs from the query
for attr_name in attrs:
if(attr_name == 'name'):
for dc_name in attrs[attr_name]:
print(f"[green][+][/] [bright_white]Name: {dc_name.decode('utf-8')}[/]")
# Path: src/modules/enum/administrators.py
def get_admins() -> None:
''' Get all the accounts from domain that has administrator privilege in somewhere '''
search_filter = f'(&(&(objectCategory=person)(objectClass=user)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))(adminCount=1))'
attributes = ['sAMAccountName']
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] [bright_white]Administrator(s) username(s):[/]")
print(f"[yellow][!][/] Users listed below are not necessarily Domain Administrators, they can be Local Administrator.")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/pass_not_req.py
def get_pass_not_req() -> None:
''' Get users from domain that does not require a password to authenticate '''
search_filter = f'(&(objectClass=user)(userAccountControl:1.2.840.113556.1.4.803:=32))'
attributes = 'sAMAccountName'
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] Users that doesn't require any password to logon:")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/pass_pol.py
def get_pass_policy() -> None:
''' Get the current password policy from the domain '''
search_filter = f'(objectClass=domainDNS)'
attributes = ['minPwdLength', 'lockoutThreshold', 'lockoutDuration']
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] [bright_white]Domain Password Policy:[/]")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
if attribute == 'lockoutThreshold' and value == '0':
console.print(f"[green][+][/] [bright_white]{attribute}:[/] [bright_yellow]{value} - Password Spray possiblity detected[/]", highlight=False)
else:
console.print(f"[green][+][/] [bright_white]{attribute}: {value}[/]", highlight=False)
# Path: src/modules/enum/all_users.py
def get_users() -> None:
''' List all usernames from the domain'''
search_filter = f'(&(objectCategory=person)(objectClass=user))'
attributes = ['sAMAccountName']
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] Domain Users:")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/disabled_accounts.py
def get_disabled_users() -> None:
''' List all the current disabled accounts from the domain '''
search_filter = f'(&(objectCategory=person)(objectClass=user)(userAccountControl:1.2.840.113556.1.4.803:=2))'
attributes = ['sAMAccountName']
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] Disabled Domain Users:")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/laps.py
def get_laps(inp) -> None:
''' Get all the LAPS password from domain pass'''
computer_name: str = inp
global search_filter
if len(computer_name) == 0:
search_filter = "(&(objectCategory=computer)(|(msLAPS-EncryptedPassword=*)(ms-MCS-AdmPwd=*)(msLAPS-Password=*)))"
else:
search_filter = f"(&(objectCategory=computer)(|(msLAPS-EncryptedPassword=*)(ms-MCS-AdmPwd=*)(msLAPS-Password=*))(name={computer_name}))"
attributes = ['sAMAccountName', 'msLAPS-Password', 'ms-MCS-AdmPwd', 'msLAPS-EncryptedPassword']
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] LAPS information:")
print(f"[yellow][!][/] If the result is blank, probably there is no LAPS information to retrive or your user does not have the required permissions\n")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/kerberoasting.py
def get_kerberoastable() -> None:
''' Search for kerberoastable users with filter and ignore krbtgt or computers'''
# TO-DO: Return kerberoastable user hash
# https://github.com/byt3bl33d3r/CrackMapExec/blob/3c3e412193cb6d3237abe90c543e5d995bfa4447/cme/protocols/ldap.py#L927C1-L927C1
search_filter = "(&(servicePrincipalName=*)(!(objectCategory=computer)))"
attributes = ['sAMAccountName', 'servicePrincipalName']
query = connect_and_fetch(search_filter)
if query:
print("[yellow][!][/] [bright_white]Kerberoastable users from domain (excluding computers):[/]")
print("[yellow][!][/] [bright_white]If the result is blank, there is no kerberoastable users to be listed [/]")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/maq_acc_quota.py
def get_maq_acc_quota() -> None:
''' Get the Macchine Account Quota value domain-level attribute '''
search_filter = "(objectClass=*)"
query_attribute = "ms-DS-MachineAccountQuota"
query = connect_and_fetch(search_filter)
if query:
print("[yellow][!][/] Domain Machine Account Quota value:")
for _dn, attrs in query:
for attr_name in attrs:
if(attr_name == query_attribute):
for maq_account_quota_value in attrs[attr_name]:
print(f"[green][+][/] [bright_white]Machine Account Quota value: {maq_account_quota_value.decode('utf-8')} [/]")
# Path: src/modules/enum/obsolete.py
def get_obsolete() -> None:
''' Search for for obsolete operating systems installed on computers'''
search_filter = ("(&(objectclass=computer)(!(userAccountControl:1.2.840.113556.1.4.803:=2))"
"(|(operatingSystem=*Windows 6*)(operatingSystem=*Windows 2000*)"
"(operatingSystem=*Windows XP*)(operatingSystem=*Windows Vista*)"
"(operatingSystem=*Windows 7*)(operatingSystem=*Windows 8*)"
"(operatingSystem=*Windows 8.1*)(operatingSystem=*Windows Server 2003*)"
"(operatingSystem=*Windows Server 2008*)(operatingSystem=*Windows Server 2000*)))")
attributes = ['name', 'operatingSystem', 'dNSHostName']
query = connect_and_fetch(search_filter)
if query:
print("[yellow][!][/] [bright_white]Obsolete operating systems installed on computers located:[/]")
print("[yellow][!][/] [bright_white]If the result is blank, there is no obsolete operating systems installed on computers to be listed [/]")
for dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/all_computers.py
def get_computers() -> None:
''' Return all the computers that can be located on the environment'''
search_filter = "(&(objectClass=computer)(!(userAccountControl:1.2.840.113556.1.4.803:=2)))"
attributes = ['sAMAccountName', 'name', 'operatingSystem', 'dNSHostName']
query = connect_and_fetch(search_filter)
if query:
print("[yellow][!][/] [bright_white]All computers located:[/]")
print("[yellow][!][/] [bright_white]If the result is blank, there is no computer(s) to be listed [/]")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/enum/trusted_delegation.py
def get_trusted_delegate() -> None:
''' Retrieve all the accounts that has msds-allowedtodelegateto enabled '''
search_filter = "(&(objectClass=User)(msDS-AllowedToDelegateTo=*))"
attributes = ['sAMAccountName']
query = connect_and_fetch(search_filter)
if query:
print("[yellow][!][/] [bright_white]All accounts with msDS-AllowedToDelegateTo located:[/]")
print("[yellow][!][/] [bright_white]If the result is blank, there is no accounts with msDS-AllowedToDelegateTo rights [/]")
for _dn, values in query:
for attribute_name in values:
for attribute in attributes:
if attribute_name == attribute:
for value in values[attribute]:
value = value.decode('utf-8')
print(f"[green][+][/] [bright_white]{attribute}: {value}[/]")
# Path: src/modules/user/whoami.py
def get_user_whoami(inp) -> None:
''' Return information from a specific user from the domain (sAMAccountName, distinguishedName, Groups, UAC value and status, Last Logon and Last Logoff time)'''
username: str = inp
if len(username) == 0:
print("[red][!][/] You need to specify a username to use 'whoami' command")
return
search_filter = f'(&(objectClass=user)(sAMAccountName={username}))'
query = connect_and_fetch(search_filter)
if query:
print(f"[yellow][!][/] Whoami: [bold white]{username}[/]")
for dn, attributes in query:
for attr_name in attributes:
# Time is stored in Windows Filetime and starts in January 1, 1601 as we can see on https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime?redirectedfrom=MSDN
# ^ Used to format lastLogon and lastLogoff time
attributes_list = {
'sAMAccountName': 'sAMAccountName',
'distinguishedName': 'distinguishedName',
'memberOf': 'Member of',
'lastLogon': 'Last Logon (Nanoseconds)',
'lastLogoff': 'Last Logoff' ,
'userAccountControl': 'UAC Value'
}
uac_values = {
'512': '[bold green]User is Enabled[/] - Password Expires',
'514': '[bold red]User is Disabled[/] - Password Expires',
'66048': "[bold green]User is Enabled[/] - [bold yellow]Password Never Expires[/]",
'66050': "[bold red]User is Disabled[/] - [bold yellow]Password Never Expires[/]"
}
for attribute, name in attributes_list.items():
if(attr_name == attribute):
for attribute_value in attributes[attr_name]:
if "userAccountControl" in attr_name:
for uac_number, uac_context in uac_values.items():
if attribute_value.decode('utf-8') == uac_number:
console.print(f"[green][+][/] UAC Status: [bright_white]{uac_context}[/]", highlight=False)
if "lastLogon" in attr_name:
last_logon_filetime = attribute_value.decode('utf-8')
last_login_datetime = filetime_to_dt(int(last_logon_filetime))
console.print(f"[green][+][/] Last Logon: [bright_white]{last_login_datetime}[/]", highlight=False)
if "lastLogoff" in attr_name:
last_logoff_filetime = attribute_value.decode('utf-8')
if last_logoff_filetime == 0:
console.print(f"[green][+][/] Last Logoff: [bright_white]{last_logoff_filetime}[/]", highlight=False)
else:
last_logoff_datetime = filetime_to_dt(int(last_logoff_filetime))
console.print(f"[green][+][/] Last Logoff: [bright_white]{last_logoff_datetime}[/]", highlight=False)
else:
print(f"[green][+][/] {name}: [bright_white]{attribute_value.decode('utf-8')}[/]")
# Path: src/main.py
from cmd import Cmd
from rich import print
from datetime import datetime
from src.ui.banner import get_banner
from src.handlers.profile import create_profile_folder, load_profile
from src.helpers.user import get_current_profile
from src.modules.enum.domain_controllers import get_domain_controllers
from src.modules.enum.administrators import get_admins
from src.modules.enum.pass_not_req import get_pass_not_req
from src.modules.enum.pass_pol import get_pass_policy
from src.modules.enum.all_users import get_users
from src.modules.enum.disabled_accounts import get_disabled_users
from src.modules.enum.laps import get_laps
from src.modules.enum.kerberoasting import get_kerberoastable
from src.modules.enum.maq_acc_quota import get_maq_acc_quota
from src.modules.enum.obsolete import get_obsolete
from src.modules.enum.all_computers import get_computers
from src.modules.enum.trusted_delegation import get_trusted_delegate
from src.modules.user.whoami import get_user_whoami
import os
#!/usr/bin/python3
class BreadsPrompt(Cmd):
current_time = datetime.now().time()
current_time = current_time.strftime('%H:%M:%S')
prompt = f"{current_time} - breads # "
intro = get_banner()
def emptyline(self):
pass
| def do_exit(self, inp): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hhd-dev/hhd
# Path: src/hhd/logging.py
def set_log_plugin(plugin: str = "main"):
global _main
with _lock:
_plugins[get_ident()] = plugin
_main = plugin
# Path: src/hhd/logging.py
def setup_logger(
log_dir: str | None = None, init: bool = True, ctx: Context | None = None
):
from rich import get_console
from rich.traceback import install
if log_dir:
log_dir = expanduser(log_dir, ctx)
install()
handlers = []
handlers.append(PluginRichHandler(PluginLogRender()))
if log_dir:
os.makedirs(log_dir, exist_ok=True)
if ctx:
fix_perms(log_dir, ctx)
handler = UserRotatingFileHandler(
os.path.join(log_dir, "hhd.log"),
maxBytes=10_000_000,
backupCount=10,
ctx=ctx,
)
handler.setFormatter(
NewLineFormatter("%(asctime)s %(module)-15s %(levelname)-8s|||%(message)s")
)
handler.doRollover()
handlers.append(handler)
FORMAT = "%(message)s"
logging.basicConfig(
level=logging.INFO,
datefmt="[%H:%M]",
format=FORMAT,
handlers=handlers,
)
if init:
get_console().print(RASTER, justify="full", markup=False, highlight=False)
logger.info(f"Handheld Daemon starting...")
# Path: src/hhd/logging.py
def update_log_plugins():
for t in enumerate():
if t.ident and t.ident not in _plugins:
_plugins[t.ident] = _main
# Path: src/hhd/plugins/conf.py
class Config:
def __init__(
self, conf: Pytree | Sequence[Pytree] = [], readonly: bool = False
) -> None:
self._conf: Pytree | MutableMapping = {}
self._lock = Lock()
self._updated = False
self.readonly = readonly
self.update(conf)
self.updated = False
def update(self, conf: Pytree | Sequence[Pytree]):
with self._lock:
conf = deepcopy(conf)
if isinstance(conf, Sequence):
self._conf = parse_confs(conf, self._conf)
else:
if isinstance(self._conf, MutableMapping):
parse_conf(conf, self._conf)
else:
self._conf = conf
self.updated = True
def __eq__(self, __value: object) -> bool:
if not isinstance(__value, Config):
return False
if __value is self:
return True
with __value._lock, self._lock:
return compare_dicts(__value._conf, self._conf)
def __setitem__(self, key: str | tuple[str, ...], val):
with self._lock:
val = deepcopy(val)
seq = to_seq(key)
cont = {}
d = cont
for s in seq[:-1]:
d[s] = {}
d = d[s]
d[seq[-1]] = val
if isinstance(self._conf, MutableMapping):
parse_conf(cont, self._conf)
else:
self._conf = cont
if self._conf != cont:
self.updated = True
def __contains__(self, key: str | tuple[str, ...]):
with self._lock:
seq = to_seq(key)
d = self._conf
for s in seq:
if s not in d:
return False
d = cast(Mapping, d)[s]
return True
def __getitem__(self, key: str | tuple[str, ...]) -> "Config":
with self._lock:
assert isinstance(self._conf, MutableMapping)
seq = to_seq(key)
d = self._conf
for s in seq:
d = cast(Mapping, d)[s]
return Config([deepcopy(d)])
def __delitem__(self, key: str | tuple[str, ...]):
with self._lock:
assert isinstance(self._conf, MutableMapping)
seq = to_seq(key)
d = self._conf
for s in seq[:-1]:
d = cast(Mapping, d)[s]
del d[seq[-1]]
self.updated = True
def get(self, key, default: A) -> A:
try:
return self[key].to(type(default))
except KeyError:
return default
def to(self, t: type[A]) -> A:
return cast(t, self.conf)
def copy(self):
return Config([self.conf])
@property
def conf(self):
with self._lock:
return deepcopy(self._conf)
@property
def updated(self):
with self._lock:
return self._updated
@updated.setter
def updated(self, v: bool):
with self._lock:
self._updated = v
# Path: src/hhd/plugins/plugin.py
class Context(NamedTuple):
class SettingsEvent(TypedDict):
class ProfileEvent(TypedDict):
class ApplyEvent(TypedDict):
class ConfigEvent(TypedDict):
class InputEvent(TypedDict):
class Emitter(Protocol):
class HHDPlugin:
class HHDAutodetect(Protocol):
def __call__(self, event: Event | Sequence[Event]) -> None:
def open(
self,
emit: Emitter,
context: Context,
):
def settings(self) -> HHDSettings:
def validate(self, tags: Sequence[str], config: Any, value: Any):
def prepare(self, conf: Config):
def update(self, conf: Config):
def close(self):
def __call__(self, existing: Sequence[HHDPlugin]) -> Sequence[HHDPlugin]:
# Path: src/hhd/plugins/settings.py
class ButtonSetting(TypedDict):
class BooleanSetting(TypedDict):
class MultipleSetting(TypedDict):
class DiscreteSetting(TypedDict):
class NumericalSetting(TypedDict):
class IntegerSetting(TypedDict):
class Color(TypedDict):
class ColorSetting(TypedDict):
class DisplaySetting(TypedDict):
class CustomSetting(TypedDict):
class Container(TypedDict):
class Mode(TypedDict):
class Validator(Protocol):
STATE_HEADER = (
"\n"
+ "# Handheld Daemon State Config\n"
+ "#\n"
+ "# This file contains plugin software-only configuration that will be retained\n"
+ "# across reboots. You may edit this file in lueu of using a frontend.\n"
+ "# This header is on the bottom to make editing easier with e.g., nano.\n"
+ "#\n"
+ "# Parameters that are stored in hardware (TDP, RGB colors, etc) and\n"
+ "# risky parameters that might cause instability and should be reset\n"
+ "# across sessions are not part of this file.\n"
+ "# Use profiles to apply changes to these settings.\n"
+ "#\n"
+ "# Persisted (software) parameters are marked by having a default value.\n"
+ "# Non-persisted/hardware parameters do not have a default value.\n"
+ "#\n"
+ "# This file and comments are autogenerated. Your comments will be discarded\n"
+ "# during configuration changes. Parameters with the value `default` are\n"
+ "# ignored and are meant as a template for you to change them.\n"
+ "#\n"
+ "# - CONFIGURATION PARAMETERS\n"
+ "#"
)
PROFILE_HEADER = (
"\n"
+ "# Handheld Daemon Profile Config\n"
+ "#\n"
+ "# This file contains the configuration options that will be set when\n"
+ "# applying the profile which shares this file name.\n"
+ "# This header is on the bottom to make editing easier with e.g., nano.\n"
+ "#\n"
+ "# Settings are applied once, when applying the profile, and only the ones\n"
+ "# that are stated change. Therefore, they may drift as the system state changes\n"
+ "# (e.g., using native TDP shortcuts, or controller profile shortcuts).\n"
+ "#\n"
+ "# It is possible to set all supported parameters using profiles, and\n"
+ "# it is encouraged for you to stack profiles together.\n"
+ "#\n"
+ "# For example, you can have TDP only profiles that control the energy budget,\n"
+ "# and controller profiles that switch controller behavior.\n"
+ "# Then, depending on the game, you can apply the appropriate 2 profiles\n"
+ "# together.\n"
+ "#\n"
+ "# This file and comments are autogenerated. Your comments will be discarded\n"
+ "# during configuration changes. Parameters with the value `unset` are\n"
+ "# ignored and are meant to act as a template for you to change them.\n"
+ "#\n"
+ "# - CONFIGURATION PARAMETERS\n"
+ "#"
)
def parse(d: Setting | Container | Mode, prev: Sequence[str], out: MutableMapping):
def parse_defaults(sets: HHDSettings):
def fill_in_defaults(s: Setting | Container | Mode):
def merge_reduce(
a: Setting | Container | Mode, b: Setting | Container | Mode
) -> Setting | Container | Mode:
def merge_reduce_sec(a: Section, b: Section):
def merge_reduce_secs(a: HHDSettings, b: HHDSettings):
def merge_settings(sets: Sequence[HHDSettings]):
def generate_desc(s: Setting | Container | Mode):
def traverse_desc(set: Setting | Container | Mode, prev: Sequence[str]):
def tranverse_desc_sec(set: HHDSettings):
def dump_comment(set: HHDSettings, header: str = STATE_HEADER):
def dump_setting(
set: Container | Mode,
prev: Sequence[str],
conf: Config,
unmark: Literal["unset", "default"] = "default",
):
def merge_dicts(a: Mapping | Any, b: Mapping | Any):
def dump_settings(
set: HHDSettings, conf: Config, unmark: Literal["unset", "default"] = "default"
):
def save_state_yaml(fn: str, set: HHDSettings, conf: Config, shash=None):
def save_blacklist_yaml(fn: str, avail: Sequence[str], blacklist: Sequence[str]):
def load_blacklist_yaml(fn: str):
def save_profile_yaml(
fn: str, set: HHDSettings, conf: Config | None = None, shash=None
):
def strip_defaults(c):
def get_default_state(set: HHDSettings):
def load_state_yaml(fn: str, set: HHDSettings):
def load_profile_yaml(fn: str):
def get_settings_hash(set: HHDSettings):
def unravel(d: Setting | Container | Mode, prev: Sequence[str], out: MutableMapping):
def unravel_options(settings: HHDSettings):
def __call__(self, tags: Sequence[str], config: Any, value: Any) -> bool:
def validate_config(
conf: Config, settings: HHDSettings, validator: Validator, use_defaults: bool = True
):
# Path: src/hhd/plugins/utils.py
def load_relative_yaml(fn: str):
"""Returns the yaml data of a file in the relative dir provided."""
import inspect
import os
import yaml
script_fn = inspect.currentframe().f_back.f_globals["__file__"] # type: ignore
dirname = os.path.dirname(script_fn)
with open(os.path.join(dirname, fn), "r") as f:
return yaml.safe_load(f)
# Path: src/hhd/plugins/settings.py
class Validator(Protocol):
def __call__(self, tags: Sequence[str], config: Any, value: Any) -> bool:
return False
# Path: src/hhd/plugins/settings.py
def get_default_state(set: HHDSettings):
return Config(parse_defaults(set))
# Path: src/hhd/plugins/settings.py
def load_blacklist_yaml(fn: str):
import yaml
try:
with open(fn, "r") as f:
return yaml.safe_load(f)["blacklist"]
except Exception as e:
logger.warning(f"Plugin blacklist not found, using default (empty).")
return ["myplugin1"]
# Path: src/hhd/plugins/settings.py
def load_profile_yaml(fn: str):
import yaml
try:
with open(fn, "r") as f:
state = cast(Mapping, strip_defaults(yaml.safe_load(f)) or {})
except FileNotFoundError:
logger.warning(
f"Profile file not found, using defaults. Searched location:\n{fn}"
)
return None
except yaml.YAMLError:
logger.warning(
f"Profile file is invalid, skipping loading. Searched location:\n{fn}"
)
return None
return Config([state])
# Path: src/hhd/plugins/settings.py
def load_state_yaml(fn: str, set: HHDSettings):
import yaml
defaults = parse_defaults(set)
try:
with open(fn, "r") as f:
state = cast(Mapping, strip_defaults(yaml.safe_load(f)) or {})
except FileNotFoundError:
logger.warning(f"State file not found. Searched location:\n{fn}")
return None
except yaml.YAMLError:
logger.warning(f"State file is invalid. Searched location:\n{fn}")
return None
return Config([defaults, state])
# Path: src/hhd/plugins/settings.py
def merge_settings(sets: Sequence[HHDSettings]):
if not sets:
return {}
if len(sets) > 1:
return reduce(merge_reduce_secs, sets)
return merge_reduce_secs({}, sets[0])
# Path: src/hhd/plugins/settings.py
def save_blacklist_yaml(fn: str, avail: Sequence[str], blacklist: Sequence[str]):
import yaml
with open(fn, "w") as f:
f.write(
(
""
+ "# \n"
+ "# Plugin blacklist\n"
+ "# The plugin providers under blacklist will not run.\n"
+ "# \n"
+ "# Warning: this file is read only on startup.\n"
+ "# `sudo systemctl restart hhd@$(whoami)`\n"
+ "# \n"
+ "# Available providers:\n"
+ f"# [{', '.join(avail)}]\n\n"
)
)
yaml.safe_dump({"blacklist": blacklist}, f, width=85, sort_keys=False)
return True
# Path: src/hhd/plugins/settings.py
def save_profile_yaml(
fn: str, set: HHDSettings, conf: Config | None = None, shash=None
):
import yaml
if shash is None:
shash = get_settings_hash(set)
if conf is None:
conf = Config({})
elif conf.get("version", None) == shash and not conf.updated:
return False
conf["version"] = shash
with open(fn, "w") as f:
yaml.safe_dump(dump_settings(set, conf, "unset"), f, width=85, sort_keys=False)
f.write("\n")
f.write(dump_comment(set, PROFILE_HEADER))
return True
# Path: src/hhd/plugins/settings.py
def get_settings_hash(set: HHDSettings):
import hashlib
return hashlib.md5(dump_comment(set).encode()).hexdigest()[:8]
# Path: src/hhd/plugins/settings.py
def save_state_yaml(fn: str, set: HHDSettings, conf: Config, shash=None):
import yaml
if shash is None:
shash = get_settings_hash(set)
if conf.get("version", None) == shash and not conf.updated:
return False
conf["version"] = shash
with open(fn, "w") as f:
yaml.safe_dump(
dump_settings(set, conf, "default"), f, sort_keys=False
)
f.write("\n")
f.write(dump_comment(set, STATE_HEADER))
return True
# Path: src/hhd/plugins/settings.py
def validate_config(
conf: Config, settings: HHDSettings, validator: Validator, use_defaults: bool = True
):
options = unravel_options(settings)
for k, d in options.items():
v = conf.get(k, None)
if d["type"] == "action":
default = False
else:
default = d["default"]
if v is None:
if use_defaults and default is not None:
conf[k] = default
continue
match d["type"]:
case "mode":
if v not in d["modes"]:
if use_defaults:
conf[k] = default
else:
del conf[k]
case "bool" | "action":
if v not in (False, True):
conf[k] = bool(v)
case "multiple" | "discrete":
if v not in d["options"]:
if use_defaults:
conf[k] = default
else:
del conf[k]
case "integer":
if not isinstance(v, int):
conf[k] = int(v)
if v < d["min"]:
conf[k] = d["min"]
if v > d["max"]:
conf[k] = d["max"]
case "float":
if not isinstance(v, float):
conf[k] = float(v)
if v < d["min"]:
conf[k] = d["min"]
if v > d["max"]:
conf[k] = d["max"]
case "color":
invalid = False
if not isinstance(v, Mapping):
invalid = True
else:
for c in ("red", "green", "blue"):
if c not in v:
invalid = True
elif not (0 <= v[c] < 256):
invalid = True
if invalid:
if use_defaults:
conf[k] = default
else:
del conf[k]
case "custom":
if not validator(d["tags"], d["config"], v):
if use_defaults:
conf[k] = default
else:
del conf[k]
# Path: src/hhd/utils.py
def expanduser(path: str, user: int | str | Context | None = None):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing.
Modified from the python implementation to support using the target userid/user."""
path = os.fspath(path)
if not path.startswith("~"):
return path
i = path.find("/", 1)
if i < 0:
i = len(path)
if i == 1:
if "HOME" in os.environ and not user:
# Fallback to environ only if user not set
userhome = os.environ["HOME"]
else:
try:
import pwd
except ImportError:
# pwd module unavailable, return path unchanged
return path
try:
if not user:
userhome = pwd.getpwuid(os.getuid()).pw_dir
elif isinstance(user, int):
userhome = pwd.getpwuid(user).pw_dir
elif isinstance(user, Context):
userhome = pwd.getpwuid(user.euid).pw_dir
else:
userhome = pwd.getpwnam(user).pw_dir
except KeyError:
# bpo-10496: if the current user identifier doesn't exist in the
# password database, return the path unchanged
return path
else:
try:
import pwd
except ImportError:
# pwd module unavailable, return path unchanged
return path
name = path[1:i]
try:
pwent = pwd.getpwnam(name)
except KeyError:
# bpo-10496: if the user name from the path doesn't exist in the
# password database, return the path unchanged
return path
userhome = pwent.pw_dir
root = "/"
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Path: src/hhd/utils.py
def fix_perms(fn: str, ctx: Context):
os.chown(fn, ctx.euid, ctx.egid)
# Path: src/hhd/utils.py
def get_context(user: str | None) -> Context | None:
try:
uid = os.getuid()
gid = os.getgid()
if not user:
if not uid:
print(f"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(
"Running as root without a specified user (`--user`). Configs will be placed at `/root/.config`."
)
print(f"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return Context(uid, gid, uid, gid, getpass.getuser())
euid = int(
subprocess.run(
["id", "-u", user], capture_output=True, check=True
).stdout.decode()
)
egid = int(
subprocess.run(
["id", "-g", user], capture_output=True, check=True
).stdout.decode()
)
if (uid or gid) and (uid != euid or gid != egid):
print(
f"The user specified with --user is not the user this process was started with."
)
return None
return Context(euid, egid, uid, gid, user)
except subprocess.CalledProcessError as e:
print(f"Getting the user uid/gid returned an error:\n{e.stderr.decode()}")
return None
except Exception as e:
print(f"Failed getting permissions with error:\n{e}")
return None
# Path: src/hhd/utils.py
def switch_priviledge(p: Context, escalate=False):
uid = os.geteuid()
gid = os.getegid()
if escalate:
os.seteuid(p.uid)
os.setegid(p.gid)
else:
os.setegid(p.egid)
os.seteuid(p.euid)
return uid, gid
# Path: src/hhd/__main__.py
import argparse
import fcntl
import logging
import os
import signal
import subprocess
import sys
import pkg_resources
import hashlib
import random
from os.path import join
from threading import Condition
from threading import Event as TEvent
from threading import RLock
from time import sleep
from typing import Sequence
from .logging import set_log_plugin, setup_logger, update_log_plugins
from .plugins import (
Config,
Emitter,
Event,
HHDAutodetect,
HHDPlugin,
HHDSettings,
load_relative_yaml,
)
from .plugins.settings import (
Validator,
get_default_state,
load_blacklist_yaml,
load_profile_yaml,
load_state_yaml,
merge_settings,
save_blacklist_yaml,
save_profile_yaml,
get_settings_hash,
save_state_yaml,
validate_config,
)
from .utils import expanduser, fix_perms, get_context, switch_priviledge
from importlib.metadata import version
from .http import HHDHTTPServer
class EmitHolder(Emitter):
def __init__(self, condition: Condition) -> None:
self._events = []
self._condition = condition
def __call__(self, event: Event | Sequence[Event]) -> None:
with self._condition:
if isinstance(event, Sequence):
self._events.extend(event)
else:
self._events.append(event)
self._condition.notify_all()
def get_events(self, timeout: int = -1) -> Sequence[Event]:
with self._condition:
if not self._events and timeout != -1:
self._condition.wait()
ev = self._events
self._events = []
return ev
def has_events(self):
with self._condition:
return bool(self._events)
def notifier(ev: TEvent, cond: Condition):
def _inner(sig, frame):
with cond:
ev.set()
cond.notify_all()
return _inner
def print_token(ctx):
token_fn = expanduser(join(CONFIG_DIR, "token"), ctx)
try:
with open(token_fn, "r") as f:
token = f.read().strip()
logger.info(f'Current HHD token (for user "{ctx.name}") is: "{token}"')
except Exception as e:
logger.error(f"Token not found or could not be read, error:\n{e}")
logger.info(
"Enable the http endpoint to generate a token automatically.\n"
+ "Or place it under '~/.config/hhd/token' manually.\n"
+ "'chown 600 ~/.config/hhd/token' for security reasons!"
)
def main():
parser = argparse.ArgumentParser(
prog="HHD: Handheld Daemon main interface.",
description="Handheld Daemon is a daemon for managing the quirks inherent in handheld devices.",
)
parser.add_argument(
"-u",
"--user",
default=None,
help="The user whose home directory will be used to store the files (~/.config/hhd).",
dest="user",
)
parser.add_argument(
"command",
nargs="*",
default=[],
help="The command to run. If empty, run as daemon. Right now, only the command token is supported.",
)
args = parser.parse_args()
user = args.user
# Setup temporary logger for permission retrieval
ctx = get_context(user)
if not ctx:
print(f"Could not get user information. Exiting...")
return
detectors: dict[str, HHDAutodetect] = {}
plugins: dict[str, Sequence[HHDPlugin]] = {}
cfg_fds = []
# HTTP data
https = None
prev_http_cfg = None
updated = False
# Check we are in a virtual environment
# TODO: Improve
exe_python = sys.executable
try:
# Create nested hhd dir
# This might mess up permissions in upward directories
# So try to deescalate
hhd_dir = expanduser(CONFIG_DIR, ctx)
try:
switch_priviledge(ctx, False)
os.makedirs(hhd_dir, exist_ok=True)
switch_priviledge(ctx, True)
fix_perms(hhd_dir, ctx)
except Exception:
pass
# Remove old dir
try:
os.rename(
join(hhd_dir, "plugins"), join(hhd_dir, "plugins_old_USE_STATEYML")
)
except Exception:
pass
set_log_plugin("main")
setup_logger(join(CONFIG_DIR, "log"), ctx=ctx)
if args.command:
if args.command[0] == "token":
print_token(ctx)
return
| else: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: IDSIA/automated-cl
# Path: torchmeta_local/utils/data/dataset.py
class ClassDataset(object):
"""Base class for a dataset of classes. Each item from a `ClassDataset` is
a dataset containing examples from the same class.
Parameters
----------
meta_train : bool (default: `False`)
Use the meta-train split of the dataset. If set to `True`, then the
arguments `meta_val` and `meta_test` must be set to `False`. Exactly one
of these three arguments must be set to `True`.
meta_val : bool (default: `False`)
Use the meta-validation split of the dataset. If set to `True`, then the
arguments `meta_train` and `meta_test` must be set to `False`. Exactly one
of these three arguments must be set to `True`.
meta_test : bool (default: `False`)
Use the meta-test split of the dataset. If set to `True`, then the
arguments `meta_train` and `meta_val` must be set to `False`. Exactly one
of these three arguments must be set to `True`.
meta_split : string in {'train', 'val', 'test'}, optional
Name of the split to use. This overrides the arguments `meta_train`,
`meta_val` and `meta_test`.
class_augmentations : list of callable, optional
A list of functions that augment the dataset with new classes. These classes
are transformations of existing classes. E.g. `transforms.HorizontalFlip()`.
"""
def __init__(self, meta_train=False, meta_val=False, meta_test=False,
meta_split=None, class_augmentations=None):
if meta_train + meta_val + meta_test == 0:
if meta_split is None:
raise ValueError('The meta-split is undefined. Use either the '
'argument `meta_train=True` (or `meta_val`/`meta_test`), or '
'the argument `meta_split="train"` (or "val"/"test").')
elif meta_split not in ['train', 'val', 'test']:
raise ValueError('Unknown meta-split name `{0}`. The meta-split '
'must be in [`train`, `val`, `test`].'.format(meta_split))
meta_train = (meta_split == 'train')
meta_val = (meta_split == 'val')
meta_test = (meta_split == 'test')
elif meta_train + meta_val + meta_test > 1:
raise ValueError('Multiple arguments among `meta_train`, `meta_val` '
'and `meta_test` are set to `True`. Exactly one must be set to '
'`True`.')
self.meta_train = meta_train
self.meta_val = meta_val
self.meta_test = meta_test
self._meta_split = meta_split
if class_augmentations is not None:
if not isinstance(class_augmentations, list):
raise TypeError('Unknown type for `class_augmentations`. '
'Expected `list`, got `{0}`.'.format(type(class_augmentations)))
unique_augmentations = OrderedSet()
for augmentations in class_augmentations:
for transform in augmentations:
if transform in unique_augmentations:
warnings.warn('The class augmentation `{0}` already '
'exists in the list of class augmentations (`{1}`). '
'To avoid any duplicate, this transformation is '
'ignored.'.format(transform, repr(transform)),
UserWarning, stacklevel=2)
unique_augmentations.add(transform)
class_augmentations = list(unique_augmentations)
else:
class_augmentations = []
self.class_augmentations = class_augmentations
def get_class_augmentation(self, index):
transform_index = (index // self.num_classes) - 1
if transform_index < 0:
return None
return self.class_augmentations[transform_index]
def get_transform(self, index, transform=None):
class_transform = self.get_class_augmentation(index)
if class_transform is None:
return transform
if transform is None:
return class_transform
return Compose([class_transform, transform])
def get_target_transform(self, index):
class_transform = self.get_class_augmentation(index)
return FixedCategory(class_transform)
@property
def meta_split(self):
if self._meta_split is None:
if self.meta_train:
self._meta_split = 'train'
elif self.meta_val:
self._meta_split = 'val'
elif self.meta_test:
self._meta_split = 'test'
else:
raise NotImplementedError()
return self._meta_split
def __getitem__(self, index):
raise NotImplementedError()
@property
def num_classes(self):
raise NotImplementedError()
def __len__(self):
return self.num_classes * (len(self.class_augmentations) + 1)
# Path: torchmeta_local/utils/data/dataset.py
class CombinationMetaDataset(MetaDataset):
"""Base class for a meta-dataset, where the classification tasks are over
multiple classes from a `ClassDataset`.
Parameters
----------
dataset : `ClassDataset` instance
A dataset of classes. Each item of `dataset` is a dataset, containing
all the examples from the same class.
num_classes_per_task : int
Number of classes per tasks. This corresponds to `N` in `N-way`
classification.
target_transform : callable, optional
A function/transform that takes a target, and returns a transformed
version. See also `torchvision.transforms`.
dataset_transform : callable, optional
A function/transform that takes a dataset (ie. a task), and returns a
transformed version of it. E.g. `transforms.ClassSplitter()`.
"""
def __init__(self, dataset, num_classes_per_task, target_transform=None,
dataset_transform=None):
if not isinstance(num_classes_per_task, int):
raise TypeError('Unknown type for `num_classes_per_task`. Expected '
'`int`, got `{0}`.'.format(type(num_classes_per_task)))
self.dataset = dataset
self.num_classes_per_task = num_classes_per_task
# If no target_transform, then use a default target transform that
# is well behaved for the `default_collate` function (assign class
# augmentations ot integers).
if target_transform is None:
target_transform = DefaultTargetTransform(dataset.class_augmentations)
super(CombinationMetaDataset, self).__init__(meta_train=dataset.meta_train,
meta_val=dataset.meta_val, meta_test=dataset.meta_test,
meta_split=dataset.meta_split, target_transform=target_transform,
dataset_transform=dataset_transform)
def __iter__(self):
num_classes = len(self.dataset)
for index in combinations(num_classes, self.num_classes_per_task):
yield self[index]
def sample_task(self):
index = self.np_random.choice(len(self.dataset),
size=self.num_classes_per_task, replace=False)
return self[tuple(index)]
def __getitem__(self, index):
if isinstance(index, int):
raise ValueError('The index of a `CombinationMetaDataset` must be '
'a tuple of integers, and not an integer. For example, call '
'`dataset[({0})]` to get a task with classes from 0 to {1} '
'(got `{2}`).'.format(', '.join([str(idx)
for idx in range(self.num_classes_per_task)]),
self.num_classes_per_task - 1, index))
assert len(index) == self.num_classes_per_task
datasets = [self.dataset[i] for i in index]
# Use deepcopy on `Categorical` target transforms, to avoid any side
# effect across tasks.
task = ConcatTask(datasets, self.num_classes_per_task,
target_transform=wrap_transform(self.target_transform,
self._copy_categorical, transform_type=Categorical))
if self.dataset_transform is not None:
task = self.dataset_transform(task)
return task
def _copy_categorical(self, transform):
assert isinstance(transform, Categorical)
transform.reset()
if transform.num_classes is None:
transform.num_classes = self.num_classes_per_task
return deepcopy(transform)
def __len__(self):
num_classes, length = len(self.dataset), 1
for i in range(1, self.num_classes_per_task + 1):
length *= (num_classes - i + 1) / i
if length > sys.maxsize:
warnings.warn('The number of possible tasks in {0} is '
'combinatorially large (equal to C({1}, {2})), and exceeds '
'machine precision. Setting the length of the dataset to the '
'maximum integer value, which undervalues the actual number of '
'possible tasks in the dataset. Therefore the value returned by '
'`len(dataset)` should not be trusted as being representative '
'of the true number of tasks.'.format(self, len(self.dataset),
self.num_classes_per_task), UserWarning, stacklevel=2)
length = sys.maxsize
return int(length)
# Path: torchmeta_local/utils/data/task.py
class Dataset(Dataset_):
def __init__(self, index, transform=None, target_transform=None):
self.index = index
self.transform = transform
self.target_transform = target_transform
def target_transform_append(self, transform):
if transform is None:
return
if self.target_transform is None:
self.target_transform = transform
else:
self.target_transform = Compose([self.target_transform, transform])
def __hash__(self):
return hash(self.index)
# Path: torchmeta_local/datasets/utils.py
def get_asset(*args, dtype=None):
filename = get_asset_path(*args)
if not os.path.isfile(filename):
raise IOError('{} not found'.format(filename))
if dtype is None:
_, dtype = os.path.splitext(filename)
dtype = dtype[1:]
if dtype == 'json':
with open(filename, 'r') as f:
data = json.load(f)
else:
raise NotImplementedError()
return data
# Path: torchmeta_local/datasets/letter.py
import numpy as np
import os
import json
import h5py
from tqdm import tqdm
from torchmeta_local.utils.data import Dataset, ClassDataset, CombinationMetaDataset
from torchmeta_local.datasets.utils import get_asset
from sklearn.datasets import fetch_openml
from sklearn.datasets import fetch_openml
def __getitem__(self, index):
label = self.labels[index % self.num_classes]
data = self.data[label]
transform = self.get_transform(index, self.transform)
target_transform = self.get_target_transform(index)
return LetterDataset(index, data, label, transform=transform, target_transform=target_transform)
@property
def num_classes(self):
return self._num_classes
@property
def data(self):
if self._data is None:
self._data_file = h5py.File(self.split_filename, 'r')
self._data = self._data_file['datasets']
return self._data
@property
def labels(self):
if self._labels is None:
with open(self.split_filename_labels, 'r') as f:
self._labels = json.load(f)
return self._labels
def _check_integrity(self):
return (os.path.isfile(self.split_filename)
and os.path.isfile(self.split_filename_labels))
def close(self):
if self._data is not None:
self._data.close()
self._data = None
def download(self):
if self._check_integrity():
return
data = fetch_openml(data_id=self.open_ml_id)
features = data.data
targets = data.target
os.makedirs(self.root, exist_ok=True)
# for each meta-data-split, get the labels, then check which data-point belongs to the set (via a mask).
# then, retrieve the features and targets belonging to the set. Then create hdf5 file for these features.
for s, split in enumerate(['train', 'val', 'test']):
labels_assets_split = get_asset(self.folder, '{0}.json'.format(split))
is_in_split = [t in labels_assets_split for t in targets]
features_split = features.loc[is_in_split]
targets_split = targets.loc[is_in_split]
assert targets_split.shape[0] == features_split.shape[0]
unique_targets_split = np.unique(targets_split)
if len(labels_assets_split) > unique_targets_split.shape[0]:
print(f"unique set of labels ({(unique_targets_split.shape[0])}) is smaller than set of labels "
f"given by assets ({len(labels_assets_split)}). Proceeding with unique set of labels.")
# write unique targets to json file.
labels_filename = os.path.join(self.root, self.filename_labels.format(split))
with open(labels_filename, 'w') as f:
json.dump(unique_targets_split.tolist(), f)
# write data (features and class labels)
filename = os.path.join(self.root, self.filename.format(split))
with h5py.File(filename, 'w') as f:
group = f.create_group('datasets')
for i, label in enumerate(tqdm(unique_targets_split, desc=filename)):
data_class = features_split.loc[targets_split == label]
group.create_dataset(label, data=data_class)
class LetterDataset(Dataset):
def __init__(self, index, data, label, transform=None, target_transform=None):
super(LetterDataset, self).__init__(index, transform=transform, target_transform=target_transform)
self.data = data
self.label = label
def __len__(self):
return len(self.data)
def __getitem__(self, index):
features = self.data[index, :]
target = self.label
if self.transform is not None:
features = self.transform(features)
if self.target_transform is not None:
target = self.target_transform(target)
return features, target
def create_asset(root='data', num_split=None, numpy_seed=42):
"""This methods creates the assets of the letter dataset. These are the meta-dataset splits from the
original data. Only run this method in case you want to create new assets. Once created, copy the assets to
this directory: torchmeta_local.datasets.assets.letter. You can also manually change the assets."""
# number of classes per split: train, valid, test (26 classes in total)
if num_split is None:
num_split = {"train": 15, "val": 5, "test": 6}
num_classes = 0
for key in num_split:
num_classes += num_split[key]
data = fetch_openml(data_id=LetterClassDataset.open_ml_id)
unique_targets = np.unique(data.target)
num_unique_targets = len(unique_targets)
assert num_classes == num_unique_targets
# split unique labels randomly
np.random.seed(numpy_seed)
| perm = np.random.permutation(num_unique_targets) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yamy-cheng/DMAOT-VOTS2023
# Path: dmaot/networks/encoders/resnest/resnet.py
class ResNet(nn.Module):
"""ResNet Variants
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self,
block,
layers,
radix=1,
groups=1,
bottleneck_width=64,
num_classes=1000,
dilated=False,
dilation=1,
deep_stem=False,
stem_width=64,
avg_down=False,
rectified_conv=False,
rectify_avg=False,
avd=False,
avd_first=False,
final_drop=0.0,
dropblock_prob=0,
last_gamma=False,
norm_layer=nn.BatchNorm2d,
freeze_at=0):
self.cardinality = groups
self.bottleneck_width = bottleneck_width
# ResNet-D params
self.inplanes = stem_width * 2 if deep_stem else 64
self.avg_down = avg_down
self.last_gamma = last_gamma
# ResNeSt params
self.radix = radix
self.avd = avd
self.avd_first = avd_first
super(ResNet, self).__init__()
self.rectified_conv = rectified_conv
self.rectify_avg = rectify_avg
if rectified_conv:
from rfconv import RFConv2d
conv_layer = RFConv2d
else:
conv_layer = nn.Conv2d
conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
if deep_stem:
self.conv1 = nn.Sequential(
conv_layer(3,
stem_width,
kernel_size=3,
stride=2,
padding=1,
bias=False,
**conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width,
stem_width,
kernel_size=3,
stride=1,
padding=1,
bias=False,
**conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width,
stem_width * 2,
kernel_size=3,
stride=1,
padding=1,
bias=False,
**conv_kwargs),
)
else:
self.conv1 = conv_layer(3,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False,
**conv_kwargs)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block,
64,
layers[0],
norm_layer=norm_layer,
is_first=False)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
norm_layer=norm_layer)
if dilated or dilation == 4:
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=1,
dilation=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif dilation == 2:
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilation=1,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
else:
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.stem = [self.conv1, self.bn1]
self.stages = [self.layer1, self.layer2, self.layer3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.freeze(freeze_at)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_layer=None,
dropblock_prob=0.0,
is_first=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(
nn.AvgPool2d(kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
else:
down_layers.append(
nn.AvgPool2d(kernel_size=1,
stride=1,
ceil_mode=True,
count_include_pad=False))
down_layers.append(
nn.Conv2d(self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False))
else:
down_layers.append(
nn.Conv2d(self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False))
down_layers.append(norm_layer(planes * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(
block(self.inplanes,
planes,
stride,
downsample=downsample,
radix=self.radix,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd,
avd_first=self.avd_first,
dilation=1,
is_first=is_first,
rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
elif dilation == 4:
layers.append(
block(self.inplanes,
planes,
stride,
downsample=downsample,
radix=self.radix,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd,
avd_first=self.avd_first,
dilation=2,
is_first=is_first,
rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
radix=self.radix,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd,
avd_first=self.avd_first,
dilation=dilation,
rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
xs = []
x = self.layer1(x)
xs.append(x) # 4X
x = self.layer2(x)
xs.append(x) # 8X
x = self.layer3(x)
xs.append(x) # 16X
# Following STMVOS, we drop stage 5.
xs.append(x) # 16X
return xs
def freeze(self, freeze_at):
if freeze_at >= 1:
for m in self.stem:
freeze_params(m)
for idx, stage in enumerate(self.stages, start=2):
if freeze_at >= idx:
freeze_params(stage)
# Path: dmaot/networks/encoders/resnest/resnet.py
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
radix=1,
cardinality=1,
bottleneck_width=64,
avd=False,
avd_first=False,
dilation=1,
is_first=False,
rectified_conv=False,
rectify_avg=False,
norm_layer=None,
dropblock_prob=0.0,
last_gamma=False):
super(Bottleneck, self).__init__()
group_width = int(planes * (bottleneck_width / 64.)) * cardinality
self.conv1 = nn.Conv2d(inplanes,
group_width,
kernel_size=1,
bias=False)
self.bn1 = norm_layer(group_width)
self.dropblock_prob = dropblock_prob
self.radix = radix
self.avd = avd and (stride > 1 or is_first)
self.avd_first = avd_first
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
if dropblock_prob > 0.0:
self.dropblock1 = DropBlock2D(dropblock_prob, 3)
if radix == 1:
self.dropblock2 = DropBlock2D(dropblock_prob, 3)
self.dropblock3 = DropBlock2D(dropblock_prob, 3)
if radix >= 1:
self.conv2 = SplAtConv2d(group_width,
group_width,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=cardinality,
bias=False,
radix=radix,
rectify=rectified_conv,
rectify_avg=rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif rectified_conv:
from rfconv import RFConv2d
self.conv2 = RFConv2d(group_width,
group_width,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=cardinality,
bias=False,
average_mode=rectify_avg)
self.bn2 = norm_layer(group_width)
else:
self.conv2 = nn.Conv2d(group_width,
group_width,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=cardinality,
bias=False)
self.bn2 = norm_layer(group_width)
self.conv3 = nn.Conv2d(group_width,
planes * 4,
kernel_size=1,
bias=False)
self.bn3 = norm_layer(planes * 4)
if last_gamma:
from torch.nn.init import zeros_
zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.dropblock_prob > 0.0:
out = self.dropblock1(out)
out = self.relu(out)
if self.avd and self.avd_first:
out = self.avd_layer(out)
out = self.conv2(out)
if self.radix == 0:
out = self.bn2(out)
if self.dropblock_prob > 0.0:
out = self.dropblock2(out)
out = self.relu(out)
if self.avd and not self.avd_first:
out = self.avd_layer(out)
out = self.conv3(out)
out = self.bn3(out)
if self.dropblock_prob > 0.0:
out = self.dropblock3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# Path: dmaot/networks/encoders/resnest/resnest.py
import torch
from .resnet import ResNet, Bottleneck
__all__ = ['resnest50', 'resnest101', 'resnest200', 'resnest269']
_url_format = 'https://s3.us-west-1.wasabisys.com/resnest/torch/{}-{}.pth'
_model_sha256 = {
name: checksum
for checksum, name in [
('528c19ca', 'resnest50'),
('22405ba7', 'resnest101'),
('75117900', 'resnest200'),
('0cc87c48', 'resnest269'),
]
}
def short_hash(name):
if name not in _model_sha256:
raise ValueError(
'Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8]
resnest_model_urls = {
name: _url_format.format(name, short_hash(name))
for name in _model_sha256.keys()
}
def resnest50(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2,
groups=1,
bottleneck_width=64,
deep_stem=True,
stem_width=32,
avg_down=True,
avd=True,
avd_first=False,
**kwargs)
if pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(resnest_model_urls['resnest50'],
progress=True,
check_hash=True))
return model
def resnest101(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3],
radix=2,
groups=1,
bottleneck_width=64,
deep_stem=True,
stem_width=64,
avg_down=True,
avd=True,
avd_first=False,
**kwargs)
if pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest101'],
progress=True,
check_hash=True))
return model
def resnest200(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3],
radix=2,
groups=1,
bottleneck_width=64,
deep_stem=True,
stem_width=64,
avg_down=True,
| avd=True, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tosiyuki/LLaVA-JP
# Path: llava/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
PLAIN = auto()
TWO = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: llava/model/llava_gpt2.py
class LlavaGpt2ForCausalLM(GPT2LMHeadModel, LlavaMetaForCausalLM):
config_class = LlavaConfig
base_model = "gpt2"
def __init__(self, config):
super(LlavaGpt2ForCausalLM, self).__init__(config)
self.model = LlavaGpt2Model(config)
#self.model = LlavaMetaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
**kwargs
) -> Union[Tuple, CausalLMOutputWithPast]:
if inputs_embeds is None:
(
input_ids,
position_ids,
attention_mask,
past_key_values,
inputs_embeds,
labels
) = self.prepare_inputs_labels_for_multimodal(
input_ids,
position_ids,
attention_mask,
past_key_values,
labels,
images
)
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
_inputs = super().prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
)
if images is not None:
_inputs['images'] = images
return _inputs
# Path: llava/model/llava_gpt_neox.py
class LlavaGptNeoxForCausalLM(PreTrainedModel, LlavaMetaForCausalLM):
config_class = LlavaConfig
base_model = "gpt_neox"
def __init__(self, config):
super(LlavaGptNeoxForCausalLM, self).__init__(config)
self.model = LlavaGptNeoxModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
if inputs_embeds is None:
(
input_ids,
position_ids,
attention_mask,
past_key_values,
inputs_embeds,
labels
) = self.prepare_inputs_labels_for_multimodal(
input_ids,
position_ids,
attention_mask,
past_key_values,
labels,
images
)
print(inputs_embeds.size())
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
_inputs = super().prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
)
if images is not None:
_inputs['images'] = images
return _inputs
# Path: llava/model/llava_llama.py
class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
config_class = LlavaConfig
base_model = "llama"
def __init__(self, config):
super(LlavaLlamaForCausalLM, self).__init__(config)
self.model = LlavaLlamaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
**kwargs
) -> Union[Tuple, CausalLMOutputWithPast]:
if inputs_embeds is None:
(
input_ids,
position_ids,
attention_mask,
past_key_values,
inputs_embeds,
labels
) = self.prepare_inputs_labels_for_multimodal(
input_ids,
position_ids,
attention_mask,
past_key_values,
labels,
images
)
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
_inputs = super().prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs
)
if images is not None:
_inputs['images'] = images
return _inputs
# Path: llava/train/dataset.py
class LazySupervisedDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, data_path: str,
tokenizer: transformers.PreTrainedTokenizer,
data_args: DataArguments):
super(LazySupervisedDataset, self).__init__()
list_data_dict = json.load(open(data_path, "r"))
print("Formatting inputs...Skip in lazy mode")
self.tokenizer = tokenizer
self.list_data_dict = list_data_dict
self.data_args = data_args
def __len__(self):
return len(self.list_data_dict)
@property
def lengths(self):
length_list = []
for sample in self.list_data_dict:
img_tokens = 128 if 'image' in sample else 0
length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens)
return length_list
@property
def modality_lengths(self):
length_list = []
for sample in self.list_data_dict:
cur_len = sum(len(conv['value'].split()) for conv in sample['conversations'])
cur_len = cur_len if 'images' in sample else -cur_len
length_list.append(cur_len)
return length_list
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
sources = self.list_data_dict[i]
if isinstance(i, int):
sources = [sources]
assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
if 'image' in sources[0]:
image_file = self.list_data_dict[i]['image']
image_folder = self.data_args.image_folder
processor = self.data_args.image_processor
image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
if self.data_args.image_aspect_ratio == 'pad':
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
else:
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
sources = preprocess_multimodal(
copy.deepcopy([e["conversations"] for e in sources]),
self.data_args
)
else:
sources = copy.deepcopy([e["conversations"] for e in sources])
data_dict = preprocess(
sources,
self.tokenizer,
has_image=('image' in self.list_data_dict[i]))
if isinstance(i, int):
data_dict = dict(input_ids=data_dict["input_ids"][0],
labels=data_dict["labels"][0])
# image exist in the data
if 'image' in self.list_data_dict[i]:
data_dict['images'] = image
elif self.data_args.is_multimodal:
# image does not exist in the data, but the model is multimodal
crop_size = self.data_args.image_processor.crop_size
data_dict['images'] = torch.zeros(3, crop_size['height'], crop_size['width'])
return data_dict
# Path: llava/train/dataset.py
class DataCollatorForSupervisedDataset(object):
"""Collate examples for supervised fine-tuning."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances]
for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
input_ids = input_ids[:, :self.tokenizer.model_max_length]
labels = labels[:, :self.tokenizer.model_max_length]
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
)
if 'images' in instances[0]:
images = [instance['images'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in images):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
# Path: llava/train/arguments_dataclass.py
class ModelArguments:
base_model: Optional[str] = field(default="gpt2",
metadata={"help": "gpt2 or gpt_neox or llama"})
model_name_or_path: Optional[str] = field(default="rinna/japanese-gpt2-xsmall")
version: Optional[str] = field(default="plain")
freeze_backbone: bool = field(default=False) # LLMをFreezeするか
tune_mm_mlp_adapter: bool = field(default=False) # 事前学習のときはmm_mlp_adapterだけ保存する.
vision_tower: Optional[str] = field(default="openai/clip-vit-large-patch14-336")
mm_vision_select_layer: Optional[int] = field(default=-2) # default to the last two layer
pretrain_mm_mlp_adapter: Optional[str] = field(default=None) # fine-tuningのときには設定
mm_projector_type: Optional[str] = field(default='mlp2x_gelu') # 2層の線形層
mm_vision_select_feature: Optional[str] = field(default="patch")
# Path: llava/train/arguments_dataclass.py
class DataArguments:
data_path: str = field(default="",
metadata={"help": "Path to the training data."})
lazy_preprocess: bool = False
is_multimodal: bool = False
image_folder: Optional[str] = field(default="/home/toshi/work/llava_jp/input/LLaVA-CC3M-Pretrain-595K/images",
metadata={"help": "Path to image data."})
image_aspect_ratio: str = 'square'
# Path: llava/train/arguments_dataclass.py
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=1024,
metadata={
"help":
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
double_quant: bool = field(
default=True,
metadata={"help": "Compress the quantization statistics through double quantization."}
)
quant_type: str = field(
default="nf4",
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
)
bits: int = field(
default=16,
metadata={"help": "How many bits to use."}
)
lora_enable: bool = False
lora_r: int = 64
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_weight_path: str = ""
lora_bias: str = "none"
mm_projector_lr: Optional[float] = None
group_by_modality_length: bool = field(default=False) # dataset sampler option
fp16: bool = field(default=False)
bf16: bool = field(default=False)
output_dir: str = field(default="./output_llava/checkpoints/llava-v1.5-japanese-gpt2-xsmall")
num_train_epochs: int = field(default=1)
per_device_train_batch_size: int = field(default=32)
per_device_eval_batch_size: int = field(default=4)
gradient_accumulation_steps: int = field(default=1)
evaluation_strategy: str = field(default="no")
save_strategy: str = field(default="steps")
save_steps: int = field(default=24000)
save_total_limit: int = field(default=1)
learning_rate: float = field(default=1e-3)
weight_decay: float = field(default=0.)
warmup_ratio: float = field(default=0.03)
logging_steps: int = field(default=1)
model_max_length: int = field(default=1024)
gradient_checkpointing: bool = field(default=True)
dataloader_num_workers: int = field(default=16)
lr_scheduler_type: str = field(default="cosine")
seed: int = field(default=42)
# Path: llava/train/llava_trainer.py
class LLaVATrainer(Trainer):
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if self.train_dataset is None or not has_length(self.train_dataset):
return None
if self.args.group_by_modality_length:
lengths = self.train_dataset.modality_lengths
return LengthGroupedSampler(
self.args.train_batch_size,
world_size=self.args.world_size * self.args.gradient_accumulation_steps,
lengths=lengths,
group_by_modality=True,
)
else:
return super()._get_train_sampler()
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
"""
opt_model = self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
if self.args.mm_projector_lr is not None:
projector_parameters = [name for name, _ in opt_model.named_parameters() if "mm_projector" in name]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in projector_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in projector_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in projector_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
"lr": self.args.mm_projector_lr,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in projector_parameters and p.requires_grad)
],
"weight_decay": 0.0,
"lr": self.args.mm_projector_lr,
},
]
else:
optimizer_grouped_parameters = [
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if optimizer_cls.__name__ == "Adam8bit":
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
logger.info(f"skipped {module}: {skipped/2**20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
logger.info(f"skipped: {skipped/2**20}M params")
return self.optimizer
def _save_checkpoint(self, model, trial, metrics=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
# Only save Adapter
#keys_to_match = ['mm_projector', 'vision_resampler']
keys_to_match = ['mm_projector']
weight_to_save = get_mm_adapter_state(self.model.named_parameters(), keys_to_match)
#weight_to_save = self.model.named_parameters().detach().cpu().clone()
if self.args.local_rank == 0 or self.args.local_rank == -1:
self.model.config.save_pretrained(output_dir)
torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
else:
super(LLaVATrainer, self)._save_checkpoint(model, trial, metrics)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
if getattr(self.args, 'tune_mm_mlp_adapter', False):
pass
else:
super(LLaVATrainer, self)._save(output_dir, state_dict)
# Path: train_llava.py
import os
import pathlib
import torch
import transformers
from typing import Dict
from llava import conversation as conversation_lib
from llava.model.llava_gpt2 import LlavaGpt2ForCausalLM
from llava.model.llava_gpt_neox import LlavaGptNeoxForCausalLM
from llava.model.llava_llama import LlavaLlamaForCausalLM
from llava.train.dataset import LazySupervisedDataset, DataCollatorForSupervisedDataset
from llava.train.arguments_dataclass import ModelArguments, DataArguments, TrainingArguments
from llava.train.llava_trainer import LLaVATrainer
from transformers import BitsAndBytesConfig
from peft import prepare_model_for_kbit_training
from peft import LoraConfig, get_peft_model
from peft.tuners.lora import LoraLayer
def rank0_print(*args):
if local_rank == 0:
print(*args)
# Borrowed from peft.utils.get_peft_model_state_dict
def get_peft_state(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: v.detach().cpu().clone() for k, v in to_return.items()}
return to_return
def get_peft_state_non_lora(named_params, require_grad_only=True):
to_return = {k: t for k, t in named_params if "lora_" not in k}
if require_grad_only:
| to_return = {k: t for k, t in to_return.items() if t.requires_grad} |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jags111/ComfyUI_Jags_Audiotools
# Path: libs/dance_diffusion/base/type.py
class ModelType(str, enum.Enum):
DD = 'DD'
# Path: libs/dance_diffusion/base/model.py
class ModelWrapperBase():
def __init__(self):
#self.uuid: str = None
#self.name: str = None
self.path: str = None
self.device_accelerator: torch.device = None
self.chunk_size: int = None
self.sample_rate: int = None
def load(
self,
path: str,
device_accelerator: torch.device,
optimize_memory_use:bool=False,
chunk_size: int=131072,
sample_rate: int=48000
):
raise NotImplementedError
# Path: libs/dance_diffusion/base/inference.py
class InferenceBase():
def __init__(
self,
device_accelerator: torch.device,
device_offload: torch.device,
optimize_memory_use: bool,
use_autocast: bool,
model: ModelWrapperBase
):
self.device_accelerator = device_accelerator
self.device_offload = device_offload if(optimize_memory_use==True) else None
self.optimize_memory_use = optimize_memory_use
self.use_autocast = use_autocast
self.model = model
self.generator = torch.Generator(device_accelerator)# if (device_accelerator.type != 'mps') else torch.device('cpu'))
self.rng_state = None
def set_device_accelerator(
self,
device: torch.device = None
):
self.device_accelerator = device
def get_device_accelerator(
self
) -> torch.device:
return self.device_accelerator
def set_model(
self,
model: ModelWrapperBase = None
):
self.model = model
def get_model(
self
) -> ModelWrapperBase:
return self.model
def expand(
self,
tensor: torch.Tensor,
expansion_map: list[int]
) -> torch.Tensor:
out = torch.empty([0], device=self.device_accelerator)
for i in range(tensor.shape[0]):
out = torch.cat([out, tensor[i,:,:].expand(expansion_map[i], -1, -1)], 0)
return out
# def cc_randn(self, shape:tuple, seed:int, device:torch.device, dtype = None, rng_state_in:torch.Tensor = None):
# initial_rng_state = self.generator.get_state()
# rng_state_out = torch.empty([shape[0], shape[1]], dtype=torch.ByteTensor,device=self.generator.device)
# rn = torch.empty(shape,device=device, dtype=dtype, device=device)
# for sample in range(shape[0]):
# for channel in range(shape[1]):
# self.generator.manual_seed(seed + sample * shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])
# rn[sample, channel] = torch.randn([shape[2]], generator=self.generator, dtype=dtype, device=device)
# rng_state_out[sample, channel] = self.generator.get_state()
# self.rng_state = rng_state_out
# self.generator.set_state(initial_rng_state)
# return rn
# def cc_randn_like(self, input:torch.Tensor, seed:int, rng_state_in:torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:
# initial_rng_state = self.generator.get_state()
# rng_state_out = torch.empty([input.shape[0], input.shape[1]], dtype=torch.ByteTensor,device=self.generator.device)
# rn = torch.empty_like(input)
# for sample in range(input.shape[0]):
# for channel in range(input.shape[1]):
# self.generator.manual_seed(seed + sample * input.shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])
# rn[sample, channel] = torch.randn([input.shape[2]], generator=self.generator, dtype=input.dtype, device=input.device)
# rng_state_out[sample, channel] = self.generator.get_state()
# self.rng_state = rng_state_out
# self.generator.set_state(initial_rng_state)
# return rn
def autocast_context(self):
if self.device_accelerator.type == 'cuda':
return torch.cuda.amp.autocast()
elif self.device_accelerator.type == 'cpu':
return torch.cpu.amp.autocast()
elif self.device_accelerator.type == 'mps':
return nullcontext()
else:
return torch.autocast(self.device_accelerator.type, dtype=torch.float32)
@contextlib.contextmanager
def offload_context(self, model):
"""
Used by inference implementations, this context manager moves the
passed model to the inference's `device_accelerator` device on enter,
and then returns it to the `device_offload` device on exit.
It also wraps the `inference.autocast_context()` context.
"""
autocast = self.autocast_context() if self.use_autocast else nullcontext()
with autocast:
if self.optimize_memory_use:
model.to(self.device_accelerator)
yield None
if self.optimize_memory_use:
model.to(self.device_offload)
# Path: libs/dance_diffusion/dd/model.py
class DDModelWrapper(ModelWrapperBase):
def __init__(self):
super().__init__()
self.module:DanceDiffusionInference = None
self.model:Callable = None
def load(
self,
path:str,
device_accelerator:torch.device,
optimize_memory_use:bool=False,
chunk_size:int=None,
sample_rate:int=None
):
default_model_config = dict(
version = [0, 0, 1],
model_info = dict(
name = 'Dance Diffusion Model',
description = 'v1.0',
type = ModelType.DD,
native_chunk_size = 65536,
sample_rate = 48000,
),
diffusion_config = dict(
n_attn_layers = 4
)
)
file = torch.load(path, map_location='cpu')
model_config = file.get('model_config')
if not model_config:
print(f"Model file {path} is invalid. Please run the conversion script.")
print(f" - Default model config will be used, which may be inaccurate.")
model_config = default_model_config
model_info = model_config.get('model_info')
diffusion_config = model_config.get('diffusion_config')
self.path = path
self.chunk_size = model_info.get('native_chunk_size')if not chunk_size else chunk_size
self.sample_rate = model_info.get('sample_rate')if not sample_rate else sample_rate
self.module = DanceDiffusionInference(
n_attn_layers=diffusion_config.get('n_attn_layers'),
sample_size=chunk_size,
sample_rate=sample_rate,
latent_dim=0,
)
self.module.load_state_dict(
file["state_dict"],
strict=False
)
self.module.eval().requires_grad_(False)
self.model = self.module.diffusion_ema if (optimize_memory_use) else self.module.diffusion_ema.to(device_accelerator)
# Path: libs/dance_diffusion/dd/inference.py
class DDInference(InferenceBase):
def __init__(
self,
device_accelerator: torch.device = None,
device_offload: torch.device = None,
optimize_memory_use: bool = False,
use_autocast: bool = True,
model: ModelWrapperBase = None
):
super().__init__(device_accelerator, device_offload, optimize_memory_use, use_autocast, model)
def generate(
self,
callback: Callable = None,
batch_size: int = None,
seed: int = None,
steps: int = None,
scheduler: SchedulerType = None,
scheduler_args: dict = None,
sampler: SamplerType = None,
sampler_args: dict = None,
**kwargs
):
self.generator.manual_seed(seed)
step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)#step_list = step_list[:-1] if sampler in [SamplerType.V_PRK, SamplerType.V_PLMS, SamplerType.V_PIE, SamplerType.V_PLMS2, SamplerType.V_IPLMS] else step_list
if SamplerType.is_v_sampler(sampler):
x_T = torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)
model = self.model.model
else:
x_T = step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)
model = VDenoiser(self.model.model)
with self.offload_context(self.model.model):
return sampler.sample(
model,
x_T,
step_list,
callback,
**sampler_args
).float()
def generate_variation(
self,
callback: Callable = None,
batch_size: int = None,
seed: int = None,
audio_source: torch.Tensor = None,
expansion_map: list[int] = None,
noise_level: float = None,
steps: int = None,
scheduler: SchedulerType = None,
scheduler_args = None,
sampler: SamplerType = None,
sampler_args = None,
**kwargs
) -> torch.Tensor:
self.generator.manual_seed(seed)
audio_source = self.expand(audio_source, expansion_map)
if SamplerType.is_v_sampler(sampler):
step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)
step_list = step_list[step_list < noise_level]
alpha_T, sigma_T = t_to_alpha_sigma(step_list[0])
x_T = alpha_T * audio_source + sigma_T * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator)
model = self.model.model
else:
scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level)
step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)
x_T = audio_source + step_list[0] * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator)
model = VDenoiser(self.model.model)
with self.offload_context(self.model.model):
return sampler.sample(
model,
x_T,
step_list,
callback,
**sampler_args
).float()
def generate_interpolation(
self,
callback: Callable = None,
batch_size: int = None,
# seed: int = None,
interpolation_positions: list[float] = None,
audio_source: torch.Tensor = None,
audio_target: torch.Tensor = None,
expansion_map: list[int] = None,
noise_level: float = None,
steps: int = None,
scheduler: SchedulerType = None,
scheduler_args = None,
sampler: SamplerType = None,
sampler_args = None,
**kwargs
) -> torch.Tensor:
if SamplerType.is_v_sampler(sampler):
step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)
step_list = step_list[step_list < noise_level]
step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling
model = self.model.model
else:
scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level)
step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)
step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling
model = VDenoiser(self.model.model)
if self.optimize_memory_use and batch_size < 2:
x_0_source = audio_source
x_0_target = audio_target
with self.offload_context(self.model.model):
x_T_source = sampler.sample(
model,
x_0_source,
step_list.flip(0),
callback,
**sampler_args
)
with self.offload_context(self.model.model):
x_T_target = sampler.sample(
model,
x_0_target,
step_list.flip(0),
callback,
**sampler_args
)
x_T = torch.cat([x_T_source, x_T_target], dim=0)
else:
x_0 = torch.cat([audio_source, audio_target], dim=0)
with self.offload_context(self.model.model):
x_T = sampler.sample(
model,
x_0,
step_list.flip(0),
callback,
**sampler_args
)
if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack
step_list[-1] = 0.0
else:
step_list = torch.cat([step_list, step_list.new_zeros([1])])
x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator)
for pos in range(len(interpolation_positions)):
x_Int[pos] = tensor_slerp_2D(x_T[0], x_T[1], interpolation_positions[pos])
with self.offload_context(self.model.model):
return sampler.sample(
model,
x_Int,
step_list,
callback,
**sampler_args
).float()
def generate_inpainting(
self,
callback: Callable = None,
batch_size: int = None,
seed: int = None,
audio_source: torch.Tensor = None,
expansion_map: list[int] = None,
mask: torch.Tensor = None,
steps: int = None,
scheduler: SchedulerType = None,
scheduler_args = None,
sampler: SamplerType = None,
sampler_args = None,
inpainting_args = None,
**kwargs
) -> torch.Tensor:
self.generator.manual_seed(seed)
method = inpainting_args.get('method')
if(method == 'repaint'):
raise Exception("Repaint currently not supported due to changed requirements")
elif(method == 'posterior_guidance'):
step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)
if SamplerType.is_v_sampler(sampler):
raise Exception('V-Sampler currently not supported for posterior guidance. Please choose a K-Sampler.')
else:
x_T = audio_source + step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)
model = PosteriorSampling(
VDenoiser(self.model.model),
x_T,
audio_source,
mask,
inpainting_args.get('posterior_guidance_scale')
)
with self.offload_context(self.model.model):
return sampler.sample(
model,
x_T,
step_list,
callback,
**sampler_args
).float()
def generate_extension(
self,
callback: Callable = None,
batch_size: int = None,
seed: int = None,
audio_source: torch.Tensor = None,
expansion_map: list[int] = None,
steps: int = None,
scheduler: SchedulerType = None,
scheduler_args = None,
sampler: SamplerType = None,
sampler_args = None,
inpainting_args = None,
keep_start: bool = None,
**kwargs
) -> torch.Tensor:
half_chunk_size = self.model.chunk_size // 2
chunk = torch.cat([audio_source[:, :, -half_chunk_size:], torch.zeros([batch_size, 2, half_chunk_size], device=self.device_accelerator)], dim=2)
#chunk = audio_source
mask = torch.cat(
[torch.ones([batch_size, 2, half_chunk_size], dtype=torch.bool, device=self.device_accelerator),
torch.zeros([batch_size, 2, half_chunk_size], dtype=torch.bool, device=self.device_accelerator)],
dim=2
)
output = self.generate_inpainting(
callback,
batch_size,
seed,
chunk,
expansion_map,
mask,
steps,
scheduler,
scheduler_args,
sampler,
sampler_args,
inpainting_args
)
if (keep_start):
return torch.cat(
[audio_source,
output[:, :, -half_chunk_size:]],
dim=2
)
else:
return output[:, :, -half_chunk_size:]
# Path: libs/dance_diffusion/api.py
import torch
import enum
from dataclasses import dataclass
from typing import Callable
from .base.type import ModelType
from .base.model import ModelWrapperBase
from .base.inference import InferenceBase
from .dd.model import DDModelWrapper
from .dd.inference import DDInference
class RequestType(str, enum.Enum):
Generation = 'Generation'
Variation = 'Variation'
Interpolation = 'Interpolation'
Inpainting = 'Inpainting'
Extension = 'Extension'
class Request:
def __init__(
self,
request_type: RequestType,
model_path: str,
model_type: ModelType,
model_chunk_size: int,
model_sample_rate: int,
**kwargs
):
self.request_type = request_type
self.model_path = model_path
self.model_type = model_type
self.model_chunk_size = model_chunk_size
self.model_sample_rate = model_sample_rate
self.kwargs = kwargs
class Response:
def __init__(
self,
result: torch.Tensor
):
self.result = result
class RequestHandler:
def __init__(
self,
device_accelerator: torch.device,
device_offload: torch.device = None,
optimize_memory_use: bool = False,
use_autocast: bool = True
):
self.device_accelerator = device_accelerator
self.device_offload = device_offload
self.model_wrapper: ModelWrapperBase = None
self.inference: InferenceBase = None
self.optimize_memory_use = optimize_memory_use
self.use_autocast = use_autocast
def process_request(
self,
request: Request,
callback: Callable = None
) -> Response:
# load the model from the request if it's not already loaded
if (self.model_wrapper == None):
self.load_model(
request.model_type,
request.model_path,
request.model_chunk_size,
request.model_sample_rate
)
elif (request.model_path != self.model_wrapper.path):
del self.model_wrapper, self.inference
self.load_model(
request.model_type,
request.model_path,
request.model_chunk_size,
| request.model_sample_rate |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zaigie/stream-infer
# Path: stream_infer/inference.py
class Inference:
def __init__(self, dispatcher: Dispatcher):
self.dispatcher = dispatcher
self.inferences_info = []
self.timers = {}
self.is_stop = False
self.process_func = self.default_process
def load_algo(
self,
algo_instance: BaseAlgo,
frame_count: int,
frame_step: int,
interval: Union[int, float],
):
if not isinstance(algo_instance, BaseAlgo):
err = f"Algo instance must be an instance of `BaseAlgo`, but got {type(algo_instance)}"
logger.error(err)
raise ValueError(err)
self.inferences_info.append((algo_instance, frame_count, frame_step, interval))
self.timers[algo_instance.name] = Timer(interval, key=algo_instance.name)
algo_instance.init()
def list_algos(self):
result = []
for algo_instance, _, _, _ in self.inferences_info:
result.append(algo_instance.name)
return result
def run(self):
for inference_info in self.inferences_info:
algo_instance, _, _, _ = inference_info
timer = self.timers[algo_instance.name]
if timer.is_time():
self._infer(inference_info)
def run_loop(self):
while not self.is_stop:
self.run()
def run_async(self):
thread = th.Thread(target=self.run_loop)
thread.start()
return thread
def stop(self):
self.is_stop = True
def auto_run_specific(self, fps: int, current_frame_index: int) -> List[str]:
current_algo_names = []
for algo_instance, _, _, frequency in self.inferences_info:
if current_frame_index % int(frequency * fps) == 0:
self.run_specific(algo_instance.name)
current_algo_names.append(algo_instance.name)
return current_algo_names
def run_specific(self, algo_name: str):
for inference_info in self.inferences_info:
algo_instance, _, _, _ = inference_info
if algo_instance.name == algo_name:
self._infer(inference_info)
def _infer(self, inference_info):
algo_instance, frame_count, frame_step, _ = inference_info
frames = self.dispatcher.get_frames(frame_count, frame_step)
if not frames:
return -1
result = algo_instance.run(frames)
self.dispatcher.collect(
self.dispatcher.get_current_position(), algo_instance.name, result
)
def default_process(self, *args, **kwargs):
pass
def process(self, func):
def wrapper(*args, **kwargs):
return func(self, *args, **kwargs)
self.process_func = wrapper
def start(
self,
player: Player,
fps: int = 30,
position: int = 0,
mode: Literal["realtime", "offline"] = "realtime",
recording_path: str = None,
):
if mode in [Mode.OFFLINE, Mode.OFFLINE.value]:
recorder = Recorder(player, recording_path) if recording_path else None
for frame, current_frame_index in player.play(fps, position):
current_algo_names = self.auto_run_specific(fps, current_frame_index)
processed_frame = self.process_func(
frame=frame, current_algo_names=current_algo_names
)
frame = processed_frame if processed_frame is not None else frame
if recorder:
recorder.add_frame(frame)
if recorder:
recorder.close()
elif mode in [Mode.REALTIME, Mode.REALTIME.value]:
player.play_async(fps)
self.run_async()
while player.is_active():
self.process_func()
self.stop()
player.stop()
else:
err = f"Unsupported mode: {mode}, only support `realtime` or `offline`"
logger.error(err)
raise ValueError(err)
self.dispatcher.clear()
# Path: stream_infer/player.py
class Player:
def __init__(
self,
dispatcher: Union[Dispatcher, BaseProxy],
producer: Union[OpenCVProducer, PyAVProducer],
source: Union[str, int],
show_progress: bool = True,
):
self.dispatcher = dispatcher
self.producer = producer
self.source = source
self.show_progress = show_progress
try:
self.info = self.producer.get_info(self.source)
except Exception as e:
raise ValueError(f"Error getting info: {e}")
self.fps = self.info["fps"]
self.play_fps = self.fps
self.frame_count = self.info["frame_count"]
self.process = None
self.is_end = mp.Value("b", False)
def play(self, fps=None, position=0):
fps = self.fps if fps is None else fps
self.play_fps = fps
interval_count = 0
if self.show_progress:
pbar = tqdm(
total=self.info["total_seconds"],
desc="Video Time",
leave=True,
unit="sec",
)
if position > 0:
self.dispatcher.set_current_position(position)
self.dispatcher.set_current_frame_index(fps * position)
if self.show_progress:
pbar.update(fps * position)
for frame in self.producer.read(self.source, fps, position):
self.dispatcher.add_frame(frame)
interval_count += 1
if interval_count >= fps:
interval_count = 0
self.dispatcher.increase_current_position()
if self.show_progress:
pbar.update(1)
else:
logger.debug(
f"{self.get_play_time()}/{position2time(self.info['total_seconds'])}"
)
yield frame, self.dispatcher.get_current_frame_index()
if self.show_progress:
pbar.close()
def play_async(self, fps=None):
"""
Starts the appropriate streaming process based on the frame count.
"""
if not isinstance(self.dispatcher, BaseProxy):
logger.error(
f"Dispatcher is not an proxy: {type(self.dispatcher)}, use create(mode='realtime') to create"
)
raise ValueError(
f"Dispatcher is not an proxy: {type(self.dispatcher)}, use create(mode='realtime') to create"
)
if fps is None or fps >= self.fps:
fps = self.fps
if fps > 30:
logger.warning(
f"FPS {fps} is too high, if your player is playing more slowly than the actual time, set a lower play fps"
)
self.play_fps = fps
if self.frame_count <= 0:
target = self.normal_stream
else:
target = self.video_stream
self.process = mp.Process(target=target)
self.process.start()
return self.process
def stop(self):
if self.process:
self.is_end.value = True
self.process.terminate()
def is_active(self) -> bool:
"""
Checks if the streaming process is still running.
"""
return (
self.process.is_alive() and not self.is_end.value if self.process else False
)
def get_play_time(self) -> str:
return position2time(self.dispatcher.get_current_position())
def video_stream(self):
"""
Handles streaming for video files. Frames are processed at a rate determined by the video's FPS.
"""
base_interval = 1 / self.play_fps
start_time = time.time()
interval_count = 0
if self.show_progress:
pbar = tqdm(
total=self.info["total_seconds"],
desc="Streaming Video Time",
leave=True,
unit="sec",
)
for idx, frame in enumerate(self.producer.read(self.source, self.play_fps)):
target_time = start_time + (idx * base_interval)
time.sleep(max(0, target_time - time.time()))
self.dispatcher.add_frame(frame)
interval_count += 1
if interval_count >= self.play_fps:
interval_count = 0
self.dispatcher.increase_current_position()
if self.show_progress:
pbar.update(1)
else:
logger.debug(
f"{self.get_play_time()}/{position2time(self.info['total_seconds'])}"
)
if self.show_progress:
pbar.close()
self.is_end.value = True
def normal_stream(self):
"""
Handles streaming for non-video files. Frames are processed at regular intervals.
"""
for frame in self.producer.read(self.source, self.play_fps):
if self.dispatcher.get_current_frame_index() % self.play_fps == 0:
self.dispatcher.increase_current_position()
logger.debug(f"{self.get_play_time()}")
self.dispatcher.add_frame(frame)
self.is_end.value = True
# Path: stream_infer/producer/_opencv.py
class OpenCVProducer:
def __init__(self, width: int, height: int, cvt_code=None):
self.width = width
self.height = height
self.cvt_code = cvt_code
def read(self, source, fps=None, position=0):
"""
Reads frames from a video file/stream_url/v4l2 device.
Optionally skips frames to meet the specified fps.
Args:
source (str): The path to the video file/stream_url/v4l2 device.
fps (int, optional): Target frames per second. If None, no frame skipping is done.
position (int, optional): The position in seconds from where to start reading the video.
Yields:
numpy.ndarray: frame
"""
cap = cv2.VideoCapture(source)
if not cap.isOpened():
raise ValueError(f"Failed to open {source}")
original_fps = cap.get(cv2.CAP_PROP_FPS)
# Skip to the requested second
if position > 0:
cap.set(cv2.CAP_PROP_POS_MSEC, position * 1000) # position in milliseconds
frame_interval = 1.0
if fps is not None and original_fps > fps:
frame_interval = original_fps / fps
frame_index = int(original_fps * position)
next_frame_to_process = frame_index
while True:
ret, frame = cap.read()
if not ret:
break
if frame_index >= next_frame_to_process:
try:
height, width, _ = frame.shape
if width != self.width or height != self.height:
frame = cv2.resize(frame, (self.width, self.height))
if self.cvt_code is not None:
frame = cv2.cvtColor(frame, self.cvt_code)
yield frame
next_frame_to_process += frame_interval
except Exception as e:
logger.error(f"Error processing frame: {e}")
raise e
frame_index += 1
cap.release()
def get_info(self, source):
"""
Extracts video properties.
Args:
source (str): The path to the video file/stream_url/v4l2 device.
Returns:
dict: Video properties including width, height, fps, and frame count.
"""
cap = cv2.VideoCapture(source)
if not cap.isOpened():
raise ValueError(f"Failed to open {source}")
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
total_seconds = int(frame_count / fps)
cap.release()
return {
"width": width,
"height": height,
"fps": fps,
"frame_count": frame_count,
"total_seconds": total_seconds,
}
# Path: stream_infer/producer/_pyav.py
class PyAVProducer:
def __init__(self, width: int, height: int, format=None):
self.width = width
self.height = height
self.format = "bgr24" if format is None else format
def read(self, source, fps=None, position=0):
"""
Reads frames from a video file/stream_url/v4l2 device.
Optionally skips frames to meet the specified fps.
Args:
source (str): The path to the video file/stream_url/v4l2 device.
fps (int, optional): Target frames per second. If None, no frame skipping is done.
position (int, optional): The position in seconds from where to start reading the video.
Yields:
numpy.ndarray: frame
"""
try:
container = av.open(source)
video_stream = next(s for s in container.streams if s.type == "video")
original_fps = video_stream.base_rate
# Seek to the specified position
if position > 0:
logger.warning(
"Using PyAVProducer and specifying position is not recommended because there is not yet a good solution to the problem of startup delays but it still works"
)
start_frame = int(position * original_fps)
frame_interval = 1.0
if fps is not None and original_fps > fps:
frame_interval = original_fps / fps
frame_index = 0
next_frame_to_process = start_frame if position > 0 else frame_index
for frame in container.decode(video=0):
if frame_index >= next_frame_to_process:
try:
frame = frame.to_ndarray(format=self.format)
height, width, _ = frame.shape
if width != self.width or height != self.height:
frame = cv2.resize(frame, (self.width, self.height))
yield frame
next_frame_to_process += frame_interval
except Exception as e:
logger.error(f"Error processing frame: {e}")
raise e
frame_index += 1
except av.AVError as e:
logger.error(f"Failed to open {source}: {e}")
raise ValueError(f"Failed to open {source}: {e}")
container.close()
def get_info(self, source):
"""
Extracts video properties.
Args:
source (str): The path to the video file/stream_url/v4l2 device.
Returns:
dict: Video properties including width, height, fps, and frame count.
"""
try:
container = av.open(source)
video_stream = next(s for s in container.streams if s.type == "video")
width = video_stream.width
height = video_stream.height
fps = video_stream.base_rate # or video_stream.average_rate
if hasattr(video_stream, "frames"):
frame_count = video_stream.frames
else:
frame_count = 0
total_seconds = int(frame_count / fps)
return {
"width": width,
"height": height,
"fps": fps,
"frame_count": frame_count,
"total_seconds": total_seconds,
}
except av.AVError as e:
raise ValueError(f"Failed to open {source}: {e}")
# Path: stream_infer/model.py
class Mode(Enum):
REALTIME = "realtime"
OFFLINE = "offline"
# Path: stream_infer/model.py
class ProducerType(Enum):
OPENCV = "opencv"
PYAV = "pyav"
# Path: stream_infer/log.py
def get_logger():
# Path: stream_infer/dynamic.py
import os
import sys
import importlib
from pydantic import BaseModel, ValidationError
from typing import Union
from .inference import Inference
from .player import Player
from .producer import OpenCVProducer, PyAVProducer
from .model import Mode, ProducerType
from .log import logger
class ProducerData(BaseModel):
type: ProducerType
width: int
height: int
class DynamicImport(BaseModel):
module: str
name: str
args: Union[tuple, None] = ()
kwargs: Union[dict, None] = {}
class AlgoKwArgs(BaseModel):
frame_count: int
frame_step: int
interval: int
class Algos(DynamicImport):
kwargs: AlgoKwArgs
class DynamicConfig(BaseModel):
mode: Mode
source: str
fps: int
dispatcher: DynamicImport
algos: list[Algos]
producer: ProducerData
process: Union[DynamicImport, None] = None
recording_path: Union[str, None] = None
class DynamicApp:
def __init__(self, config: DynamicConfig) -> None:
try:
config = DynamicConfig(**config)
except ValidationError as e:
err = f"Invalid config: {e}"
logger.error(err)
raise e
self.config = config
dispatcher_module = self.dynamic_import(config.dispatcher.module)
dispatcher_cls = getattr(dispatcher_module, config.dispatcher.name)
self.dispatcher = dispatcher_cls.create(
mode=config.mode, *config.dispatcher.args, **config.dispatcher.kwargs
)
self.inference = Inference(self.dispatcher)
if config.process is not None:
process_module = self.dynamic_import(config.process.module)
self.inference.process(getattr(process_module, config.process.name))
def start(self):
if self.config.producer.type in [
ProducerType.OPENCV,
ProducerType.OPENCV.value,
]:
producer = OpenCVProducer(
self.config.producer.width, self.config.producer.height
)
elif self.config.producer.type in [ProducerType.PYAV, ProducerType.PYAV.value]:
producer = PyAVProducer(
self.config.producer.width, self.config.producer.height
)
else:
raise ValueError(
f"Unknown producer: {producer}, must be 'opencv' or 'pyav'"
)
for algo in self.config.algos:
module = self.dynamic_import(algo.module)
algo_class = getattr(module, algo.name)
self.inference.load_algo(algo_class(), **algo.kwargs.dict())
self.inference.start(
Player(
self.dispatcher,
producer,
source=self.config.source,
show_progress=False,
),
| fps=self.config.fps, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DongGeun-Yoon/DGDM
# Path: Register.py
class Registers:
def __init__(self):
raise RuntimeError("Registries is not intended to be instantiated")
datasets = Register('datasets')
runners = Register('runners')
# Path: runners/BBDMRunner.py
class BBDMRunner(BaseRunner):
def __init__(self, config):
super().__init__(config)
def initialize_model(self, config):
if config.model.model_type == "BBDM":
bbdmnet = BrownianBridgeModel(config)
else:
raise NotImplementedError
# initialize model
try:
bbdmnet.apply(weights_init)
except:
pass
return bbdmnet
def load_model_from_checkpoint(self):
states = super().load_model_from_checkpoint()
def print_model_summary(self, net):
def get_parameter_number(model):
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_num, trainable_num
total_num, trainable_num = get_parameter_number(net)
print("Total Number of parameter: %.2fM" % (total_num / 1e6))
print("Trainable Number of parameter: %.2fM" % (trainable_num / 1e6))
def initialize_optimizer_scheduler(self, net, config):
# diffusion model weight
learning_params = [{'params':net.denoise_fn.parameters(), 'lr':config.model.BB.optimizer.lr}]
# condition model weight
if config.model.CondParams.train or config.model.CondParams.pretrained is None:
learning_params.append({'params':net.cond_stage_model.parameters(), 'lr':config.model.CondParams.lr})
optimizer = torch.optim.Adam(learning_params,
weight_decay=config.model.BB.optimizer.weight_decay,
betas=(config.model.BB.optimizer.beta1, config.model.BB.optimizer.beta2)
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min',
verbose=True,
threshold_mode='rel',
**vars(config.model.BB.lr_scheduler)
)
return [optimizer], [scheduler]
@torch.no_grad()
def get_checkpoint_states(self, stage='epoch_end'):
model_states, optimizer_scheduler_states = super().get_checkpoint_states()
return model_states, optimizer_scheduler_states
def loss_fn(self, net, batch, epoch, step, opt_idx=0, stage='train', write=True):
x, x_cond = batch
loss, additional_info, cond = net(x, x_cond)
if write:
self.writer.add_scalar(f'loss/{stage}', loss, step)
self.writer.add_scalar(f'loss/cond', cond, step)
loss = loss + cond
return loss
@torch.no_grad()
def sample(self, net, batch, sample_path, stage='train', write=True):
sample_path = make_dir(os.path.join(sample_path, f'{stage}_sample'))
x, x_cond = batch
# batch_size = x.shape[0] if x.shape[0] < 4 else 4
batch_size = 1
x = x[0:1]
x_cond = x_cond[0:1]
grid_size = max(x.size(1), x_cond.size(1))
# save images
sample = net.sample(x_cond, clip_denoised=self.config.testing.clip_denoised)
sample, prediction = sample[0], sample[1]
channels = ['ir105', 'sw038', 'wv063']
for z, channel in enumerate(channels):
x_conds = x_cond[0,:, z:z+1]
x_split = x[0,:, z:z+1]
sample_split = sample[:, z:z+1]
prediction_split = prediction[:, z:z+1]
save_single_video(x_conds, sample_path, f'{channel}_input.png', grid_size, to_normal=self.config.data.dataset_config.to_normal)
save_single_video(x_split, sample_path, f'{channel}_target.png', grid_size, to_normal=self.config.data.dataset_config.to_normal)
save_single_video(prediction_split, sample_path, f'{channel}_deter.png', grid_size, to_normal=self.config.data.dataset_config.to_normal)
save_single_video(sample_split, sample_path, f'{channel}_proba.png', grid_size, to_normal=self.config.data.dataset_config.to_normal)
if stage == 'val':
target = torch.clamp(((x_split+1)/2), 0, 1).unsqueeze(0).cpu().numpy()
prediction_split = torch.clamp(((prediction_split+1)/2), 0, 1).unsqueeze(0).cpu().numpy()
sample_split = torch.clamp(((sample_split+1)/2), 0, 1).unsqueeze(0).cpu().numpy()
mse_, mae_, ssim_, psnr_ = metric(prediction_split, target, mean=0, std=1, return_ssim_psnr=True)
mse_2, mae_2, ssim_2, psnr_2 = metric(sample_split, target, mean=0, std=1, return_ssim_psnr=True)
print(f"=======================================")
print(f"{channel}_Deterministic MAE : {mae_:.2f}, MSE : {mse_:.2f}, SSIM : {ssim_:.4f}, PSNR : {psnr_:.2f}")
print(f"{channel}_Probabilistic MAE : {mae_2:.2f}, MSE : {mse_2:.2f}, SSIM : {ssim_2:.4f}, PSNR : {psnr_2:.2f}")
if write:
self.writer.add_scalar(f'val_step/{channel}_deter_MSE', mse_, self.global_step)
self.writer.add_scalar(f'val_step/{channel}_deter_MAE', mae_, self.global_step)
self.writer.add_scalar(f'val_step/{channel}_deter_SSIM', ssim_, self.global_step)
self.writer.add_scalar(f'val_step/{channel}_deter_PSNR', psnr_, self.global_step)
self.writer.add_scalar(f'val_step/{channel}_prob_MSE', mse_2, self.global_step)
self.writer.add_scalar(f'val_step/{channel}_prob_MAE', mae_2, self.global_step)
self.writer.add_scalar(f'val_step/{channel}_prob_SSIM', ssim_2, self.global_step)
self.writer.add_scalar(f'val_step/{channel}_prob_PSNR', psnr_2, self.global_step)
@torch.no_grad()
def sample_to_eval(self, net, test_loader, sample_path, save_frame=True):
inputs_path = make_dir(os.path.join(sample_path, 'input'))
target_path = make_dir(os.path.join(sample_path, 'target'))
deter_path = make_dir(os.path.join(sample_path, 'deter'))
prob_path = make_dir(os.path.join(sample_path, 'prob'))
total_path = make_dir(os.path.join(sample_path, 'Total'))
if save_frame:
frame_path = sample_path.replace('sample_to_eval', 'sample_frame')
inputs_frame = make_dir(os.path.join(frame_path, 'input'))
target_frame = make_dir(os.path.join(frame_path, 'target'))
deter_frame = make_dir(os.path.join(frame_path, 'deter'))
prob_frame = make_dir(os.path.join(frame_path, 'prob'))
pbar = tqdm(test_loader, total=len(test_loader), smoothing=0.01)
batch_size = self.config.data.test.batch_size
grid_size = self.config.data.dataset_config.in_frames
to_normal = self.config.data.dataset_config.to_normal
sample_num = self.config.testing.sample_num
channels = self.config.data.channels
real_embeddings = [[] for _ in range(len(channels))]
det_embeddings = [[] for _ in range(len(channels))]
fake_embeddings = [[[] for _ in range(sample_num)] for _ in range(len(channels))]
MAE = [AverageMeter() for _ in range(len(channels))]
MSE = [AverageMeter() for _ in range(len(channels))]
PSNR = [AverageMeter() for _ in range(len(channels))]
SSIM = [AverageMeter() for _ in range(len(channels))]
LPIPS = [AverageMeter() for _ in range(len(channels))]
MAE2 = [AverageMeter() for _ in range(len(channels))]
MSE2 = [AverageMeter() for _ in range(len(channels))]
PSNR2 = [AverageMeter() for _ in range(len(channels))]
SSIM2 = [AverageMeter() for _ in range(len(channels))]
LPIPS2 = [AverageMeter() for _ in range(len(channels))]
idx = 0
loss_fn = lpips.LPIPS().cuda()
i3d = load_i3d_pretrained().cuda()
# FVD
def to_i3d(x):
if x.size(1) == 1:
x = x.repeat(1, 3, 1, 1) # hack for greyscale images
x = x.unsqueeze(0).permute(0, 2, 1, 3, 4) # BTCHW -> BCTHW
return x
for test_batch in pbar:
if idx >= 1000 and False:
break
x, x_cond = test_batch
b, f, c, h, w = x.shape
for j in range(sample_num): # iteration
sample = net.sample(x_cond, clip_denoised=False)
sample, pred = sample[0].reshape(b, f, c, h, w), sample[1].reshape(b, f, c, h, w)
for i in range(batch_size):
input_b = x_cond[i]
target_b = x[i]
sample_b = sample[i]
pred_b = pred[i]
for z, channel in enumerate(channels):
inputs_split = input_b[:, z:z+1]
target_split = target_b[:, z:z+1]
sample_split = sample_b[:, z:z+1]
prediction_split = pred_b[:, z:z+1]
names = idx + i
# save frame
if save_frame:
if j == 0:
save_frames(inputs_split, inputs_frame, f'{names:06}_{channel}_input.png', grid_size, to_normal=to_normal)
save_frames(target_split, target_frame, f'{names:06}_{channel}_target.png', grid_size, to_normal=to_normal)
save_frames(prediction_split, deter_frame, f'{names:06}_{channel}_deter.png', grid_size, to_normal=to_normal)
save_frames(sample_split, prob_frame, f'{names:06}_{channel}_{j}_proba.png', grid_size, to_normal=to_normal)
# save gif
if j == 0:
images = [(frames * 127.5 + 127.5)[0].detach().cpu().numpy() for frames in inputs_split]
imageio.mimsave(os.path.join(inputs_frame, f'{names:06}_{channel}.gif'), images, loop=0)
images = [(frames * 127.5 + 127.5)[0].detach().cpu().numpy() for frames in target_split]
imageio.mimsave(os.path.join(target_frame, f'{names:06}_{channel}.gif'), images, loop=0)
images = [(frames * 127.5 + 127.5)[0].detach().cpu().numpy() for frames in prediction_split]
imageio.mimsave(os.path.join(deter_frame, f'{names:06}_{channel}.gif'), images, loop=0)
images = [(frames * 127.5 + 127.5)[0].detach().cpu().numpy() for frames in sample_split]
imageio.mimsave(os.path.join(prob_frame, f'{names:06}_{channel}_{j}.gif'), images, loop=0)
# save one png
if j == 0:
save_single_video(inputs_split, inputs_path, f'{names:06}_{channel}_input.png', grid_size, to_normal=to_normal)
save_single_video(target_split, target_path, f'{names:06}_{channel}_target.png', grid_size, to_normal=to_normal)
save_single_video(prediction_split, deter_path, f'{names:06}_{channel}_deter.png', grid_size, to_normal=to_normal)
save_single_video(sample_split, prob_path, f'{names:06}_{channel}_{j}_proba.png', grid_size, to_normal=to_normal)
save_single_video(torch.cat([inputs_split, target_split, prediction_split, sample_split], dim=0), total_path, f'{channel}_{names:06}_{j}_total.png', grid_size, to_normal=to_normal)
trues = torch.clamp(((target_split+1)/2), 0, 1)
deters = torch.clamp(((prediction_split+1)/2), 0, 1)
probs = torch.clamp(((sample_split+1)/2), 0, 1)
if j == 0:
real_embeddings[z].append(get_feats(to_i3d(trues), i3d))
det_embeddings[z].append(get_feats(to_i3d(deters), i3d))
fake_embeddings[z][j].append(get_feats(to_i3d(probs), i3d))
# Diffusion
mse_, mae_, ssim_, psnr_ = metric(probs.unsqueeze(0).cpu().numpy(), trues.unsqueeze(0).cpu().numpy(), mean=0, std=1, return_ssim_psnr=True)
lpips_ = loss_fn(probs, trues)
lpips_ = lpips_.mean().item()
MAE[z].update(mae_, 1)
MSE[z].update(mse_, 1)
SSIM[z].update(ssim_, 1)
PSNR[z].update(psnr_, 1)
LPIPS[z].update(lpips_, 1)
# Prediction
mse_, mae_, ssim_, psnr_ = metric(deters.unsqueeze(0).cpu().numpy(), trues.unsqueeze(0).cpu().numpy(), mean=0, std=1, return_ssim_psnr=True)
lpips_ = loss_fn(deters, trues)
lpips_ = lpips_.mean().item()
MAE2[z].update(mae_, 1)
MSE2[z].update(mse_, 1)
SSIM2[z].update(ssim_, 1)
PSNR2[z].update(psnr_, 1)
LPIPS2[z].update(lpips_, 1)
# TODO: per frame
# psnr_per = np.zeros(10)
# for f_idx in range(grid_size):
# mse = np.mean((preds.cpu().numpy()[f_idx]-trues.cpu().numpy()[f_idx])**2)
# psnr_per[f_idx] = - 10 * np.log10(mse)
# ssim_per = np.zeros(10)
# for f_idx in range(grid_size):
# ssim_per[f_idx] = cal_ssim(preds.cpu().numpy()[f_idx].swapaxes(0, 2), trues.cpu().numpy()[f_idx].swapaxes(0, 2), multichannel=True)
# mse_per = np.sum(np.mean((preds.unsqueeze(0).cpu().numpy() - trues.unsqueeze(0).cpu().numpy())**2, axis=(0)), axis=(1,2,3))
# mae_per = np.sum(np.mean(np.abs(preds.unsqueeze(0).cpu().numpy() - trues.unsqueeze(0).cpu().numpy()), axis=(0)), axis=(1,2,3))
# print(" ".join([str(i) for i in mse_per]), " ".join([str(i) for i in mae_per]), " ".join([str(i) for i in psnr_per]), " ".join([str(i) for i in ssim_per]), file=results)
idx += batch_size
if idx != 1 and (idx) % 1 == 0:
for z, channel in enumerate(channels):
real_embedding = np.concatenate(real_embeddings[z], axis=0)
fake_embedding = [np.concatenate(fake_embeddings[z][i], axis=0) for i in range(sample_num)]
det_embedding = np.concatenate(det_embeddings[z], axis=0)
print(f"--------[{channel}]--------")
print("Probabilistic FVD :", end=' ')
for k in range(sample_num):
fvd = compute_fvd(real_embedding, fake_embedding[k])
print(f"{fvd:.4f}", end=' ')
print("\nDeterministic FVD : {:.4f}".format(compute_fvd(real_embedding, det_embedding)))
print(f"Test [{idx}/{len(test_loader)*b}] MAE : {MAE[z].val:.3f} ({MAE[z].avg:.3f}), MSE : {MSE[z].val:.3f} ({MSE[z].avg:.3f}), SSIM : {SSIM[z].val:.3f} ({SSIM[z].avg:.3f}), PSNR : {PSNR[z].val:.3f} ({PSNR[z].avg:.3f}), LPIPS : {LPIPS[z].val:.3f} ({LPIPS[z].avg:.3f})")
print(f"Deterministic, MAE : {MAE2[z].val:.3f} ({MAE2[z].avg:.3f}), MSE : {MSE2[z].val:.3f} ({MSE2[z].avg:.3f}), SSIM : {SSIM2[z].val:.3f} ({SSIM2[z].avg:.3f}), PSNR : {PSNR2[z].val:.3f} ({PSNR2[z].avg:.3f}), LPIPS : {LPIPS2[z].val:.3f} ({LPIPS2[z].avg:.3f})")
print("------------------------")
# Path: utils.py
import argparse
import importlib
import omegaconf.dictconfig
from Register import Registers
from runners.BBDMRunner import BBDMRunner
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
| if isinstance(value, dict) or isinstance(value, omegaconf.dictconfig.DictConfig): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: VinAIResearch/MISCA
# Path: trainer.py
class Trainer(object):
def __init__(self, args, collate, train_dataset=None, dev_dataset=None, test_dataset=None):
self.args = args
self.train_dataset = train_dataset
self.dev_dataset = dev_dataset
self.test_dataset = test_dataset
self.collate_fn = collate
args.n_chars = len(self.train_dataset.chars)
if 'bert' in self.args.model_type:
self.tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_dataset.load_bert(self.tokenizer)
dev_dataset.load_bert(self.tokenizer)
test_dataset.load_bert(self.tokenizer)
self.intent_label_lst = get_intent_labels(args)
self.slot_label_lst, self.hiers = get_slots_all(args)
self.pad_token_label_id = args.ignore_index
self.config_class, self.model_class, _ = MODEL_CLASSES[args.model_type]
if 'bert' in self.args.model_type:
self.config = self.config_class.from_pretrained(args.model_name_or_path, finetuning_task=args.task)
self.model = self.model_class.from_pretrained(
args.model_name_or_path,
config=self.config,
args=args,
intent_label_lst=self.intent_label_lst,
slot_label_lst=self.slot_label_lst,
slot_hier=self.hiers
)
else:
self.model = self.model_class(args, len(self.train_dataset.vocab), self.intent_label_lst, self.slot_label_lst, self.hiers)
if args.base_model:
model_state = self.model.state_dict()
pretrained_state = torch.load(os.path.join(args.base_model, 'model.bin'))
pretrained_state = { k:v for k,v in pretrained_state.items() if k in model_state and v.size() == model_state[k].size() }
model_state.update(pretrained_state)
self.model.load_state_dict(model_state)
self.device = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
self.model.to(self.device)
def train(self):
train_sampler = RandomSampler(self.train_dataset)
train_dataloader = DataLoader(self.train_dataset, sampler=train_sampler, batch_size=self.args.train_batch_size, collate_fn=self.collate_fn)
writer = SummaryWriter(log_dir=self.args.model_dir)
if self.args.max_steps > 0:
t_total = self.args.max_steps
self.args.num_train_epochs = (
self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
)
else:
t_total = len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs
print("check init")
results = self.evaluate("dev", -1)
print(results)
logfile = open(self.args.model_dir + "/" + self.args.logging, 'w')
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=t_total
)
if self.args.logging_steps < 0:
self.args.logging_steps = len(train_dataloader)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(self.train_dataset))
logger.info(" Num Epochs = %d", self.args.num_train_epochs)
logger.info(" Total train batch size = %d", self.args.train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Logging steps = %d", self.args.logging_steps)
global_step = 0
tr_loss = 0.0
self.model.zero_grad()
best_sent = 0
best_slot = 0
train_iterator = trange(int(self.args.num_train_epochs), desc="Epoch")
early_stopping = EarlyStopping(patience=self.args.early_stopping, verbose=True)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", position=0, leave=True)
print("\nEpoch", _)
for step, batch in enumerate(epoch_iterator):
self.model.train()
batch = tuple(t.to(self.device) for t in batch[:-1]) + (batch[-1], ) # GPU or CPU
if 'bert' in self.args.model_type:
inputs = {
"input_ids": batch[0],
"attention_mask": batch[3],
"intent_label_ids": batch[5],
"slot_labels_ids": batch[6],
"token_type_ids": batch[4],
"heads": batch[2],
"seq_lens": batch[-1].cpu()
}
else:
inputs = {
"input_ids": batch[0],
"char_ids": batch[1],
"intent_label_ids": batch[2],
"slot_labels_ids": batch[3],
"seq_lens": batch[4],
}
outputs = self.model(**inputs)
total_loss, intent_loss, slot_loss, count_loss = outputs[0]
if self.args.gradient_accumulation_steps > 1:
total_loss = total_loss / self.args.gradient_accumulation_steps
if _ < self.args.num_train_epochs * self.args.only_intent:
total_loss = intent_loss + count_loss
total_loss.backward()
else:
total_loss.backward()
tr_loss += total_loss.item()
if (step + 1) % self.args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
self.model.zero_grad()
global_step += 1
if self.args.logging_steps > 0 and global_step % (self.args.logging_steps) == 0:
print("\nTuning metrics:", self.args.tuning_metric)
results = self.evaluate("dev", _)
# self.evaluate("test")
writer.add_scalar("Loss/validation", results["loss"], _)
writer.add_scalar("Intent Accuracy/validation", results["intent_acc"], _)
writer.add_scalar("Intent F1", results["intent_f1"], _)
writer.add_scalar("Slot F1/validation", results["slot_f1"], _)
writer.add_scalar("Mean Intent Slot", results["mean_intent_slot"], _)
writer.add_scalar("Sentence Accuracy/validation", results["semantic_frame_acc"], _)
if results['semantic_frame_acc'] >= best_sent or results['slot_f1'] >= best_slot:
best_sent = results['semantic_frame_acc']
best_slot = results['slot_f1']
self.save_model()
results = self.evaluate('test', _)
logfile.write('\n\nEPOCH = ' + str(_) + '\n')
for key in sorted(results.keys()):
to_write = " {key} = {value}".format(key=key, value=str(results[key]))
logfile.write(to_write)
logfile.write("\n")
if 0 < self.args.max_steps < global_step:
epoch_iterator.close()
break
if 0 < self.args.max_steps < global_step or early_stopping.early_stop:
train_iterator.close()
break
writer.add_scalar("Loss/train", tr_loss / global_step, _)
logfile.close()
return global_step, tr_loss / global_step
def write_evaluation_result(self, out_file, results):
out_file = self.args.model_dir + "/" + out_file
w = open(out_file, "w", encoding="utf-8")
w.write("***** Eval results *****\n")
for key in sorted(results.keys()):
to_write = " {key} = {value}".format(key=key, value=str(results[key]))
w.write(to_write)
w.write("\n")
w.close()
def evaluate(self, mode, epoch):
if mode == "test":
dataset = self.test_dataset
elif mode == "dev":
dataset = self.dev_dataset
else:
raise Exception("Only dev and test dataset available")
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size, collate_fn=self.collate_fn)
logger.info("***** Running evaluation on %s dataset *****", mode)
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", self.args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
slot_label_map = {i: label for i, label in enumerate(self.slot_label_lst)}
out_slot_label_list = []
slot_preds_list = []
predictions = []
intent_labels = []
int_len_gold = []
int_len_pred = []
results = {}
self.model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(self.device) for t in batch[:-1]) + (batch[-1], )
# print(batch)
with torch.no_grad():
if 'bert' in self.args.model_type:
inputs = {
"input_ids": batch[0],
"attention_mask": batch[3],
"intent_label_ids": batch[5],
"slot_labels_ids": batch[6],
"token_type_ids": batch[4],
"heads": batch[2],
"seq_lens": batch[-1].cpu()
}
else:
inputs = {
"input_ids": batch[0],
"char_ids": batch[1],
"intent_label_ids": batch[2],
"slot_labels_ids": batch[3],
"seq_lens": batch[4],
}
outputs = self.model(**inputs)
if self.args.num_intent_detection:
tmp_eval_loss, (intent_logits, slot_logits, intent_dec) = outputs[:2]
else:
tmp_eval_loss, (intent_logits, slot_logits) = outputs[:2]
eval_loss += tmp_eval_loss[0].mean().item()
nb_eval_steps += 1
# Intent prediction
intent_logits = F.logsigmoid(intent_logits).detach().cpu()
intent_preds = intent_logits.numpy()
if self.args.num_intent_detection:
intent_nums = intent_dec.detach().cpu().numpy()
out_intent_label_ids = inputs["intent_label_ids"].detach().cpu().numpy()
intent_labels.extend(out_intent_label_ids.tolist())
# Slot prediction
if self.args.use_crf:
slot_preds = np.array(self.model.crf.decode(slot_logits))
else:
slot_preds = slot_logits.detach().cpu()
out_slot_labels_ids = inputs["slot_labels_ids"].detach().cpu().numpy()
cur = []
if self.args.num_intent_detection:
num_intents = intent_logits.size(1)
intent_nums = np.argmax(intent_nums, axis=-1)
gold_nums = np.sum(out_intent_label_ids, axis=-1)
int_len_gold.extend(gold_nums.tolist())
int_len_pred.extend(intent_nums.tolist())
for num, preds in zip(intent_nums, intent_preds):
idx = preds.argsort()[-num:]
p = np.zeros(num_intents)
p[idx] = 1.
predictions.append(p)
cur.append(p)
else:
predictions.extend(np.rint(intent_preds).tolist())
if not self.args.use_crf:
slot_preds_arg = np.argmax(slot_preds.numpy(), axis=2)
else:
slot_preds_arg = slot_preds
for i in range(out_slot_labels_ids.shape[0]):
slt = None
out_slot_label_list.append([])
slot_preds_list.append([])
for j in range(out_slot_labels_ids.shape[1]):
if out_slot_labels_ids[i, j] != self.pad_token_label_id:
out_slot_label_list[-1].append(slot_label_map[out_slot_labels_ids[i][j]])
predict_label = slot_label_map[slot_preds_arg[i][j]]
if predict_label[:2] == 'B-':
slt = predict_label[2:]
elif predict_label[:2] == 'I-':
if slt is None:
predict_label = 'O'
elif slt != predict_label[2:]:
predict_label = 'O'
else:
slt = None
slot_preds_list[-1].append(predict_label)
eval_loss = eval_loss / nb_eval_steps
results['loss'] = eval_loss
predictions = np.array(predictions)
intent_labels = np.array(intent_labels)
total_result = compute_metrics(predictions, intent_labels, slot_preds_list, out_slot_label_list)
results.update(total_result)
int_len_gold = np.array(int_len_gold)
int_len_pred = np.array(int_len_pred)
results['num_acc'] = (int_len_gold == int_len_pred).mean()
results['epoch'] = epoch
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
if mode == "test":
self.write_evaluation_result("eval_test_results.txt", results)
elif mode == "dev":
self.write_evaluation_result("eval_dev_results.txt", results)
return results
def save_model(self):
# Save model checkpoint (Overwrite)
if not os.path.exists(self.args.model_dir):
os.makedirs(self.args.model_dir)
model_to_save = self.model.module if hasattr(self.model, "module") else self.model
torch.save(model_to_save, os.path.join(self.args.model_dir, 'model.bin'))
# Save training arguments together with the trained model
torch.save(self.args, os.path.join(self.args.model_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", self.args.model_dir)
def load_model(self):
# Check whether model exists
if not os.path.exists(self.args.model_dir):
raise Exception("Model doesn't exists! Train first!")
try:
self.model.load_state_dict(torch.load(os.path.join(self.args.model_dir, 'model.bin')), strict=False)
self.model.to(self.device)
logger.info("***** Model Loaded *****")
except Exception:
raise Exception("Some model files might be missing...")
# Path: utils.py
def init_logger():
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
# Path: utils.py
def load_tokenizer(args):
return MODEL_CLASSES[args.model_type][2].from_pretrained(args.model_name_or_path)
# Path: utils.py
def read_prediction_text(args):
return [text.strip() for text in open(os.path.join(args.pred_dir, args.pred_input_file), 'r', encoding='utf-8')]
# Path: utils.py
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not args.no_cuda and torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
# Path: utils.py
MODEL_CLASSES = {
"lstm": (None, JointLSTM, None),
"roberta": (RobertaConfig, JointRoberta, RobertaTokenizer)
}
# Path: utils.py
MODEL_PATH_MAP = {
"lstm": "",
"roberta": "roberta-base"
}
# Path: utils.py
def get_intent_labels(args):
return [label.strip() for label in open(os.path.join(args.data_dir, args.task, args.intent_label_file), 'r', encoding='utf-8')]
# Path: utils.py
def get_slots_all(args):
slot_labels = get_slot_labels(args)
hier = ()
if args.task == 'mixatis':
slot_parents = get_clean_labels(args)
hier = (slot_parents, )
slot_type = sorted(set([name[2:] for name in slot_labels if name[:2] == 'B-' or name[:2] == 'I-']))
hier += (slot_type, )
return slot_labels, hier
# Path: data_loader.py
class TextLoader(Dataset):
def __init__(self, args, mode):
self.args = args
self.intent_labels = get_intent_labels(args)
self.slot_labels, self.hiers = get_slots_all(args)
self.vocab = Vocab(min_freq=self.args.min_freq)
self.chars = Vocab()
self.examples = self.build(mode)
def load_bert(self, tokenizer):
pad_token_label_id = self.args.ignore_index
self.examples = convert_examples_to_features(self.examples, self.args.max_seq_len, tokenizer,
pad_token_label_id=pad_token_label_id)
@classmethod
def read_file(cls, input_file, quotechar=None):
""" Read data file of given path.
:param file_path: path of data file.
:return: list of sentence, list of slot and list of intent.
"""
texts, slots, intents = [], [], []
text, slot = [], []
with open(input_file, 'r', encoding="utf8") as fr:
for line in fr.readlines():
items = line.strip().split()
if len(items) == 1:
texts.append(text)
slots.append(slot)
if "/" not in items[0]:
intents.append(items)
else:
new = items[0].split("/")
intents.append([new[1]])
# clear buffer lists.
text, slot = [], []
elif len(items) == 2:
text.append(items[0].strip())
slot.append(items[1].strip())
return texts, slots, intents
def _create_examples(self, texts, chars, intents, slots, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for i, (text, char, intent, slot) in enumerate(zip(texts, chars, intents, slots)):
guid = "%s-%s" % (set_type, i)
# 1. input_text
words = self.vocab.get_index(text) # Some are spaced twice
words = [self.vocab.start_index] + words + [self.vocab.end_index]
# char
char = self.chars.get_index(char)
max_char = max([len(x) for x in char])
for j in range(len(char)):
char[j] = char[j] + [0] * (max_char - len(char[j]))
char = [[0] * max_char] + char + [[0] * max_char]
# 2. intent
_intent = intent[0].split('#')
intent_label = [0 for _ in self.intent_labels]
for _int in _intent:
idx = self.intent_labels.index(_int) if _int in self.intent_labels else self.intent_labels.index("UNK")
intent_label[idx] = 1
# 3. slot
slot_labels = []
for s in slot:
slot_labels.append(self.slot_labels.index(s) if s in self.slot_labels else self.slot_labels.index("UNK"))
slot_labels = [self.slot_labels.index('PAD')] + slot_labels + [self.slot_labels.index('PAD')]
assert len(words) == len(slot_labels)
examples.append(InputExample(guid=guid, words=words, chars=char, intent_label=intent_label, slot_labels=slot_labels, text=text))
return examples
def build(self, mode):
data_path = os.path.join(self.args.data_dir, self.args.task, mode + '.txt')
logger.info("LOOKING AT {}".format(data_path))
texts, slots, intents = self.read_file(data_path)
chars = []
max_len = 0
for text in texts:
chars.append([])
for word in text:
chars[-1].append(list(word))
cache = os.path.join(self.args.data_dir, f'vocab_{self.args.task}')
if os.path.exists(cache):
self.vocab.load(cache)
elif mode == 'train':
self.vocab.add(texts)
self.vocab.save(cache)
cache_chars = os.path.join(self.args.data_dir, f'chars_{self.args.task}')
if os.path.exists(cache_chars):
self.chars.load(cache_chars)
elif mode == 'train':
self.chars.add(chars)
self.chars.save(cache_chars)
return self._create_examples(texts=texts,
chars=chars,
intents=intents,
slots=slots,
set_type=mode)
def __getitem__(self, index):
example = self.examples[index]
words = torch.tensor(example.words, dtype=torch.long)
intent = torch.tensor(example.intent_label, dtype=torch.float)
slot = torch.tensor(example.slot_labels, dtype=torch.long)
chars = torch.tensor(example.chars, dtype=torch.long)
if 'bert' in self.args.model_type:
attention_mask = torch.tensor(example.attention_mask, dtype=torch.long)
token_type_ids = torch.tensor(example.token_type_ids, dtype=torch.long)
heads = torch.tensor(example.heads, dtype=torch.long)
return (words, chars, heads, attention_mask, token_type_ids, intent, slot)
else:
return (words, chars, intent, slot)
def __len__(self):
return len(self.examples)
# Path: data_loader.py
class TextCollate():
def __init__(self, pad_index, num_intents, max_seq_len):
self.pad_index = pad_index
self.num_intents = num_intents
self.max_seq_len = max_seq_len
def __call__(self, batch):
len_list = [len(x[-1]) for x in batch]
len_char = [x[1].size(1) for x in batch]
max_len = max(len_list)
max_char = max(len_char)
seq_lens = []
bert = len(batch[0]) > 4
char_padded = torch.LongTensor(len(batch), max_len, max_char)
slot_padded = torch.LongTensor(len(batch), max_len)
intent = torch.FloatTensor(len(batch), self.num_intents)
char_padded.zero_()
intent.zero_()
slot_padded.zero_()
if not bert:
text_padded = torch.LongTensor(len(batch), max_len)
text_padded.zero_()
else:
input_ids = torch.LongTensor(len(batch), self.max_seq_len)
attention_mask = torch.LongTensor(len(batch), self.max_seq_len)
token_type_ids = torch.LongTensor(len(batch), self.max_seq_len)
heads = torch.LongTensor(len(batch), max_len)
input_ids.zero_()
attention_mask.zero_()
token_type_ids.zero_()
heads.zero_()
# Get sorted index of len_list.
sorted_index = np.argsort(len_list)[::-1]
for i, index in enumerate(sorted_index):
seq_lens.append(len_list[index])
intent[i] = batch[index][-2]
slot = batch[index][-1]
slot_padded[i, :slot.size(0)] = slot
char = batch[index][1]
char_padded[i, :char.size(0), :char.size(1)] = char
if not bert:
text = batch[index][0]
text_padded[i, :text.size(0)] = text
else:
input_ids[i] = batch[index][0]
attention_mask[i] = batch[index][3]
token_type_ids[i] = batch[index][4]
head = batch[index][2]
heads[i, :head.size(0)] = head
if not bert:
return text_padded, char_padded, intent, slot_padded, torch.tensor(seq_lens, dtype=torch.long)
else:
return input_ids, char_padded, heads, attention_mask, token_type_ids, intent, slot_padded, torch.tensor(seq_lens, dtype=torch.long)
# Path: main.py
import argparse
from trainer import Trainer
from utils import init_logger, load_tokenizer, read_prediction_text, set_seed, MODEL_CLASSES, MODEL_PATH_MAP, get_intent_labels, get_slots_all
from data_loader import TextLoader, TextCollate
def main(args):
init_logger()
set_seed(args)
slot_label_lst, hiers = get_slots_all(args)
collate = TextCollate(0, len(get_intent_labels(args)), args.max_seq_len)
train_dataset = TextLoader(args, 'train')
dev_dataset = TextLoader(args, 'dev')
test_dataset = TextLoader(args, 'test')
trainer = Trainer(args, collate, train_dataset, dev_dataset, test_dataset)
if args.do_train:
trainer.train()
if args.do_eval:
trainer.load_model()
trainer.evaluate('dev', 0)
trainer.evaluate("test", -1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", default=None, required=True, type=str, help="The name of the task to train")
parser.add_argument("--model_dir", default=None, required=True, type=str, help="Path to save, load model")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--intent_label_file", default="intent_label.txt", type=str, help="Intent Label file")
parser.add_argument("--slot_label_file", default="slot_label.txt", type=str, help="Slot Label file")
parser.add_argument("--slot_label_clean", default="slot_clean.txt", type=str, help="Slot Label file")
parser.add_argument("--logging", default="log.txt", type=str, help="Logging file")
# LAAT
parser.add_argument("--n_levels", default=1, type=int, help="Number of attention")
parser.add_argument("--attention_mode", default=None, type=str)
parser.add_argument("--level_projection_size", default=32, type=int)
parser.add_argument("--d_a", default=-1, type=int)
parser.add_argument("--char_embed", default=64, type=int)
parser.add_argument("--char_out", default=64, type=int)
parser.add_argument("--use_charcnn", action="store_false", help="Whether to use CharCNN")
parser.add_argument("--use_charlstm", action="store_false", help="Whether to use CharLSTM")
parser.add_argument("--word_embedding_dim", default=128, type=int)
parser.add_argument("--encoder_hidden_dim", default=128, type=int)
parser.add_argument("--decoder_hidden_dim", default=256, type=int)
parser.add_argument("--attention_hidden_dim", default=256, type=int)
parser.add_argument("--attention_output_dim", default=256, type=int)
# Config training
parser.add_argument("--model_type", default="bert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument('--seed', type=int, default=1234, help="random seed for initialization")
parser.add_argument("--train_batch_size", default=32, type=int, help="Batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int, help="Batch size for evaluation.")
parser.add_argument("--max_seq_len", default=100, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=50, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--weight_decay", default=0, type=float, help="Weight decay if we apply some.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
| parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zyc00/PartSLIP2
# Path: src/utils.py
def normalize_pc(pc_file, save_dir, io, device, save_normalized_pc=False):
pc = io.load_pointcloud(pc_file, device = device)
xyz = pc.points_padded().reshape(-1,3)
rgb = pc.features_padded().reshape(-1,3)
xyz = xyz - xyz.mean(axis=0)
xyz = xyz / torch.norm(xyz, dim=1, p=2).max().item()
xyz = xyz.cpu().numpy()
rgb = rgb.cpu().numpy()
if save_normalized_pc:
save_colored_pc(os.path.join(save_dir, "normalized_pc.ply"), xyz, rgb)
return xyz, rgb
# Path: src/render_pc.py
def render_pc(xyz, rgb, save_dir, device):
pc = Pointclouds(points=[torch.Tensor(xyz).to(device)],
features=[torch.Tensor(rgb).to(device)])
#pc = io.load_pointcloud(pc_file, device=device)
img_dir = os.path.join(save_dir, "rendered_img")
os.makedirs(img_dir, exist_ok=True)
indices = [0, 4, 7, 1, 5, 2, 8, 6, 3, 9]
views = [[10, 0], [10, 90], [10, 180], [10, 270], [40, 0], [40, 120], [40, 240], [-20, 60], [-20, 180], [-20, 300]]
pc_idx_list = []
screen_coords_list = []
for i, view in enumerate(views):
img, pc_idx, screen_coords = render_single_view(pc, view, device, camera_distance=2.2)
plt.imsave(os.path.join(img_dir, f"{i}.png"), img[0, ..., :3].cpu().numpy() * 0.99999)
pc_idx_list.append(pc_idx)
screen_coords_list.append(screen_coords)
pc_idx = torch.cat(pc_idx_list, dim=0).squeeze()
screen_coords = torch.cat(screen_coords_list, dim=0).reshape(len(views),-1, 3)[...,:2]
np.save(f"{save_dir}/idx.npy", pc_idx.cpu().numpy())
np.save(f"{save_dir}/coor.npy", screen_coords.cpu().numpy())
return img_dir, pc_idx.cpu().numpy(), screen_coords.cpu().numpy(), len(views)
# Path: src/glip_inference.py
def glip_inference(glip_demo, save_dir, part_names, sam_predictor, num_views=10,
save_pred_img=True, save_individual_img=False, save_pred_json=False):
pred_dir = os.path.join(save_dir, "glip_pred")
os.makedirs(pred_dir, exist_ok = True)
seg_masks = [[] for _ in range(num_views)]
preds = [[] for _ in range(num_views)]
for i in range(num_views):
image = load_img("%s/rendered_img/%d.png" % (save_dir, i))
result, top_predictions = glip_demo.run_on_web_image(image, part_names, 0.5)
if save_pred_img:
plt.imsave("%s/%d.png" % (pred_dir, i), result[:, :, [2, 1, 0]])
bbox = top_predictions.bbox.cpu().numpy()
score = top_predictions.get_field("scores").cpu().numpy()
labels = top_predictions.get_field("labels").cpu().numpy()
if save_individual_img:
save_individual_img(image, bbox, labels, len(part_names), pred_dir, i)
for j in range(len(bbox)):
x1, y1, x2, y2 = bbox[j].tolist()
preds[i].append((np.array([x1, y1, x2, y2]), labels[j].item() - 1))
for i in range(num_views):
image = load_img("%s/rendered_img/%d.png" % (save_dir, i))
sam_predictor.set_image(image)
preds_view = preds[i]
for pred in preds_view:
bbox, cat_id = pred
mask = segment(sam_predictor, bbox)
seg_masks[i].append((mask, cat_id, bbox))
return seg_masks
# Path: src/glip_inference.py
def load_model(config_file, weight_file):
cfg.local_rank = 0
cfg.num_gpus = 1
cfg.merge_from_file(config_file)
cfg.merge_from_list(["MODEL.WEIGHT", weight_file])
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
glip_demo = GLIPDemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
show_mask_heatmaps=False
)
return glip_demo
# Path: src/bbox2seg.py
def bbox2seg(xyz, superpoint, masks, screen_coor_all, point_idx_all, part_names,
save_dir,
num_view=10, solve_instance_seg=True, visualize=True):
print("semantic segmentation...")
n_category = len(part_names)
n_sp = len(superpoint)
sp_visible_cnt = np.zeros(n_sp) #visible points for each superpoint
sp_bbox_visible_cnt = np.zeros((n_category, n_sp))
#visible points of superpoint j that are covered by at least a bounding box of category i
masks_per_view = masks
in_mask_ratio_list = [[[] for j in range(n_sp)] for i in range(n_category)] #used for instance segmentation
visible_pts_list = []
view_cat_bbox = [[[] for _ in range(len(part_names))] for _ in range(num_view)]
for i in range(num_view):
screen_coor = screen_coor_all[i] #2D projected location of each 3D point
point_idx = point_idx_all[i] #point index of each 2D pixel
visible_pts = np.unique(point_idx)[1:] # the first one is -1
visible_pts_list.append(visible_pts)
valid_masks = []
mask_2d = np.zeros([point_idx_all.shape[1], point_idx_all.shape[2], len(part_names)])
for mask_cat in masks_per_view[i]:
mask, cat_id, bbox = mask_cat
view_cat_bbox[i][cat_id].append(bbox)
mask_points = np.nonzero(mask)
x1, x2 = np.asarray(mask_points[1]).min(), np.asarray(mask_points[1]).max()
y1, y2 = np.asarray(mask_points[0]).min(), np.asarray(mask_points[0]).max()
if check_pc_within_bbox(x1, y1, x2, y2, np.int16(screen_coor)).mean() < 0.98:
#ignore bbox covering the whole objects
valid_masks.append(mask_cat)
mask_2d[..., cat_id] = np.logical_or(mask, mask_2d[..., cat_id])
# visualize SAM result
os.makedirs(f"{save_dir}/sam_result/", exist_ok=True)
image = load_img(f"{save_dir}/rendered_img/{i}.png")
for cat_id in range(len(part_names)):
masked_image = image * (1 - mask_2d[..., cat_id][..., None]) + \
mask_2d[..., cat_id][..., None] * np.array([255, 0, 0])
for bbox in view_cat_bbox[i][cat_id]:
bbox = np.int16(bbox)
masked_image = draw_rectangle(masked_image, bbox[0], bbox[1], bbox[2], bbox[3])
plt.imsave(f"{save_dir}/sam_result/{part_names[cat_id]}_{i}.jpg", np.uint8(masked_image))
view_idx = point_idx_all[i]
for k, sp in enumerate(superpoint):
sp_visible_pts = intersection(sp, visible_pts)
sp_visible_cnt[k] += len(sp_visible_pts)
in_bbox = np.zeros((n_category, len(sp_visible_pts)), dtype=bool)
if len(sp_visible_pts) != 0:
sp_coor = screen_coor[sp_visible_pts]
bb1 = {'x1': sp_coor[:, 0].min(), 'y1': sp_coor[:, 1].min(), \
'x2': sp_coor[:, 0].max(), 'y2': sp_coor[:, 1].max()}
for mask_cat in valid_masks:
mask, cat_id, bbox = mask_cat
mask_points = np.nonzero(mask)
x1, x2 = np.asarray(mask_points[1]).min(), np.asarray(mask_points[1]).max()
y1, y2 = np.asarray(mask_points[0]).min(), np.asarray(mask_points[0]).max()
if len(sp_visible_pts) == 0:
in_mask_ratio_list[cat_id][k].append(-1)
else:
bb2 = {'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}
if get_iou(bb1, bb2) < 1e-6:
in_mask_ratio_list[cat_id][k].append(0)
else:
# point_seg_mask = check_pc_within_bbox(x1, y1, x2, y2, np.int16(sp_coor))
point_seg_mask = check_pc_within_seg_mask(mask, np.int16(sp_coor))
in_bbox[cat_id] = np.logical_or(in_bbox[cat_id], point_seg_mask)
in_mask_ratio_list[cat_id][k].append(point_seg_mask.mean())
for j in range(n_category):
sp_bbox_visible_cnt[j, k] += in_bbox[j].sum()
sem_score = sp_bbox_visible_cnt / (sp_visible_cnt.reshape(1, -1) + 1e-6)
sem_score[:, sp_visible_cnt == 0] = 0
sem_seg = np.ones(xyz.shape[0], dtype=np.int32) * -1
# assign semantic labels to superpoints
for i in range(n_sp):
if sem_score[:, i].max() < 0.5:
continue
idx = -1
for j in reversed(range(n_category)): #give priority to small parts
if sem_score[j, i] >= 0.5 and part_names[j] in ["handle", "button", "wheel", "knob", "switch", "bulb", "shaft", "touchpad", "camera", "screw"]:
idx = j
break
if idx == -1:
idx = np.argmax(sem_score[:, i])
sem_seg[superpoint[i]] = idx
if visualize:
os.makedirs("%s/semantic_seg" % save_dir, exist_ok=True)
for j in range(n_category):
rgb_sem = np.ones((xyz.shape[0], 3)) * (sem_seg == j).reshape(-1, 1)
save_colored_pc("%s/semantic_seg/%s.ply" % (save_dir, part_names[j]), xyz, rgb_sem)
if solve_instance_seg == False:
return sem_seg, None
print("instance segmentation...")
os.makedirs("%s/instance_seg" % save_dir, exist_ok=True)
connectivity = calc_sp_connectivity(xyz, superpoint)
ins_seg = np.ones(xyz.shape[0], dtype=np.int32) * -1
ins_cnt = 0
for j in range(n_category):
f = []
for i in range(n_sp): # initialize union-find sets
f.append(i)
# merge superpoints that are adjacent and have similar bounding box ratio
for i in range(n_sp):
if sem_seg[superpoint[i][0]] == j:
for k in range(i):
if sem_seg[superpoint[k][0]] == j and connectivity[i][k]:
ratio_i = np.array(in_mask_ratio_list[j][i])
ratio_k = np.array(in_mask_ratio_list[j][k])
mask = np.logical_and(ratio_i > -1, ratio_k > -1) # -1 indicates invisible
if mask.sum() == 0 or max(ratio_i[mask].sum(), ratio_k[mask].sum()) < 1e-3:
dis = 1
else:
dis = np.abs(ratio_i[mask] - ratio_k[mask]).sum()
dis /= max(ratio_i[mask].sum(), ratio_k[mask].sum())
l1 = len(superpoint[i])
l2 = len(superpoint[k])
if dis < 0.2 and max(l1, l2) / min(l1, l2) < 100:
f[get_union(f, i)] = get_union(f, k) # merge two union-find sets
instances = []
flags = []
merged_sps = [[] for i in range(n_sp)]
for i in range(n_sp):
merged_sps[get_union(f, i)].append(superpoint[i])
for i in range(n_sp):
if len(merged_sps[i]) > 0 and sem_seg[superpoint[i][0]] == j:
instances.append(np.concatenate(merged_sps[i]))
flags.append(False)
#filter out instances that have small iou with all bounding boxes
for i in range(num_view):
screen_coor = screen_coor_all[i] #2D projected location of each 3D point
visible_pts = visible_pts_list[i]
for k, instance in enumerate(instances):
if flags[k]:
continue
ins_visible_pts = intersection(instance, visible_pts)
if len(ins_visible_pts) == 0:
continue
ins_coor = screen_coor[ins_visible_pts]
bb1 = {'x1': ins_coor[:, 0].min(), 'y1': ins_coor[:, 1].min(), \
'x2': ins_coor[:, 0].max(), 'y2': ins_coor[:, 1].max()}
for mask_cat in masks_per_view[i]:
mask, cat_id, bbox = mask_cat
if cat_id != j:
continue
mask_points = np.nonzero(mask)
x1, x2 = np.asarray(mask_points[1]).min(), np.asarray(mask_points[1]).max()
y1, y2 = np.asarray(mask_points[0]).min(), np.asarray(mask_points[0]).max()
bb2 = {'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}
if get_iou(bb1, bb2) > 0.5:
flags[k] = True
break
rgb_ins = np.zeros((xyz.shape[0], 3))
for i in range(len(instances)):
if flags[i]:
ins_seg[instances[i]] = ins_cnt
ins_cnt += 1
rgb_ins[instances[i]] = np.random.rand(3)
if visualize:
save_colored_pc("%s/instance_seg/%s.ply" % (save_dir, part_names[j]), xyz, rgb_ins)
return sem_seg, ins_seg
# Path: run_partslip.py
import os
import torch
import json
import numpy as np
from pytorch3d.io import IO
from src.utils import normalize_pc
from src.render_pc import render_pc
from src.glip_inference import glip_inference, load_model
from src.bbox2seg import bbox2seg
from segment_anything import sam_model_registry, SamPredictor
def Infer(input_pc_file, category, model, part_names, zero_shot=False, save_dir="tmp"):
if zero_shot:
config ="GLIP/configs/glip_Swin_L.yaml"
weight_path = "./models/glip_large_model.pth"
print("-----Zero-shot inference of %s-----" % input_pc_file)
else:
config ="GLIP/configs/glip_Swin_L_pt.yaml"
weight_path = "./models/%s.pth" % category
print("-----Few-shot inference of %s-----" % input_pc_file)
print("[loading GLIP model...]")
glip_demo = load_model(config, weight_path)
print("[creating tmp dir...]")
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
io = IO()
os.makedirs(save_dir, exist_ok=True)
print(save_dir)
print("[normalizing input point cloud...]")
xyz, rgb = normalize_pc(input_pc_file, save_dir, io, device)
print("[rendering input point cloud...]")
img_dir, pc_idx, screen_coords, num_views = render_pc(xyz, rgb, save_dir, device)
print("[glip infrence...]")
SAM_ENCODER_VERSION = "vit_h"
SAM_CHECKPOINT_PATH = "./models/sam_vit_h_4b8939.pth"
sam = sam_model_registry[SAM_ENCODER_VERSION](checkpoint=SAM_CHECKPOINT_PATH)
sam.to(device=torch.device("cuda:0"))
sam_predictor = SamPredictor(sam)
masks = glip_inference(glip_demo, save_dir, part_names, sam_predictor, num_views=num_views)
| print('[generating superpoints...]') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: fzmi/ubdd
# Path: models/dino/util/misc.py
def inverse_sigmoid(x, eps=1e-3):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1/x2)
# Path: models/dino/models/dino/utils.py
def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor, learnedwh=None):
"""
Input:
- memory: bs, \sum{hw}, d_model
- memory_padding_mask: bs, \sum{hw}
- spatial_shapes: nlevel, 2
- learnedwh: 2
Output:
- output_memory: bs, \sum{hw}, d_model
- output_proposals: bs, \sum{hw}, 4
"""
N_, S_, C_ = memory.shape
base_scale = 4.0
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
if learnedwh is not None:
wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0 ** lvl)
else:
wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += (H_ * W_)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
return output_memory, output_proposals
# Path: models/dino/models/dino/utils.py
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
# Path: models/dino/models/dino/utils.py
def _get_activation_fn(activation, d_model=256, batch_dim=0):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
if activation == "prelu":
return nn.PReLU()
if activation == "selu":
return F.selu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
# Path: models/dino/models/dino/utils.py
def gen_sineembed_for_position(pos_tensor):
# n_query, bs, _ = pos_tensor.size()
# sineembed_tensor = torch.zeros(n_query, bs, 256)
scale = 2 * math.pi
dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * (dim_t // 2) / 128)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.size(-1) == 2:
pos = torch.cat((pos_y, pos_x), dim=2)
elif pos_tensor.size(-1) == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
return pos
# Path: models/dino/models/dino/ops/modules/ms_deform_attn.py
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
# for amp
if value.dtype == torch.float16:
# for mixed precision
output = MSDeformAttnFunction.apply(
value.to(torch.float32), input_spatial_shapes, input_level_start_index, sampling_locations.to(torch.float32), attention_weights, self.im2col_step)
output = output.to(torch.float16)
output = self.output_proj(output)
return output
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
output = self.output_proj(output)
return output
# Path: models/dino/models/dino/deformable_transformer.py
import math, random
import copy
import torch
from typing import Optional
from torch import nn, Tensor
from models.dino.util.misc import inverse_sigmoid
from .utils import gen_encoder_output_proposals, MLP,_get_activation_fn, gen_sineembed_for_position
from .ops.modules import MSDeformAttn
from .utils import RandomBoxPerturber
else:
dec_layer_share = False
assert layer_share_type is None
self.decoder_sa_type = decoder_sa_type
assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']
# choose encoder layer type
if deformable_encoder:
encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points, add_channel_attention=add_channel_attention, use_deformable_box_attn=use_deformable_box_attn, box_attn_type=box_attn_type)
else:
raise NotImplementedError
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers,
encoder_norm, d_model=d_model,
num_queries=num_queries,
deformable_encoder=deformable_encoder,
enc_layer_share=enc_layer_share,
two_stage_type=two_stage_type
)
# choose decoder layer type
if deformable_decoder:
decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, dec_n_points, use_deformable_box_attn=use_deformable_box_attn, box_attn_type=box_attn_type,
key_aware_type=key_aware_type,
decoder_sa_type=decoder_sa_type,
module_seq=module_seq)
else:
raise NotImplementedError
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=d_model, query_dim=query_dim,
modulate_hw_attn=modulate_hw_attn,
num_feature_levels=num_feature_levels,
deformable_decoder=deformable_decoder,
decoder_query_perturber=decoder_query_perturber,
dec_layer_number=dec_layer_number, rm_dec_query_scale=rm_dec_query_scale,
dec_layer_share=dec_layer_share,
use_detached_boxes_dec_out=use_detached_boxes_dec_out
)
self.d_model = d_model
self.nhead = nhead
self.dec_layers = num_decoder_layers
self.num_queries = num_queries # useful for single stage model only
self.num_patterns = num_patterns
if not isinstance(num_patterns, int):
Warning("num_patterns should be int but {}".format(type(num_patterns)))
self.num_patterns = 0
if num_feature_levels > 1:
if self.num_encoder_layers > 0:
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
else:
self.level_embed = None
self.learnable_tgt_init = learnable_tgt_init
assert learnable_tgt_init, "why not learnable_tgt_init"
self.embed_init_tgt = embed_init_tgt
if (two_stage_type != 'no' and embed_init_tgt) or (two_stage_type == 'no'):
self.tgt_embed = nn.Embedding(self.num_queries, d_model)
nn.init.normal_(self.tgt_embed.weight.data)
else:
self.tgt_embed = None
# for two stage
self.two_stage_type = two_stage_type
self.two_stage_pat_embed = two_stage_pat_embed
self.two_stage_add_query_num = two_stage_add_query_num
self.two_stage_learn_wh = two_stage_learn_wh
assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type)
if two_stage_type =='standard':
# anchor selection at the output of encoder
self.enc_output = nn.Linear(d_model, d_model)
self.enc_output_norm = nn.LayerNorm(d_model)
if two_stage_pat_embed > 0:
self.pat_embed_for_2stage = nn.Parameter(torch.Tensor(two_stage_pat_embed, d_model))
nn.init.normal_(self.pat_embed_for_2stage)
if two_stage_add_query_num > 0:
self.tgt_embed = nn.Embedding(self.two_stage_add_query_num, d_model)
if two_stage_learn_wh:
self.two_stage_wh_embedding = nn.Embedding(1, 2)
else:
self.two_stage_wh_embedding = None
if two_stage_type == 'no':
self.init_ref_points(num_queries) # init self.refpoint_embed
self.enc_out_class_embed = None
self.enc_out_bbox_embed = None
# evolution of anchors
self.dec_layer_number = dec_layer_number
if dec_layer_number is not None:
if self.two_stage_type != 'no' or num_patterns == 0:
assert dec_layer_number[0] == num_queries, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries})"
else:
assert dec_layer_number[0] == num_queries * num_patterns, f"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries}) * num_patterns({num_patterns})"
self._reset_parameters()
self.rm_self_attn_layers = rm_self_attn_layers
if rm_self_attn_layers is not None:
print("Removing the self-attn in {} decoder layers".format(rm_self_attn_layers))
for lid, dec_layer in enumerate(self.decoder.layers):
if lid in rm_self_attn_layers:
dec_layer.rm_self_attn_modules()
| self.rm_detach = rm_detach |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ZhenboSong/RobustSIRR
# Path: models/base.py
def discriminator(k=4, fc=True):
return Discriminator(models.resnet18(pretrained=True, progress=False), k, fc=fc)
# Path: models/networks.py
class SCM(nn.Module):
def __init__(self, out_plane):
super(SCM, self).__init__()
self.main = nn.Sequential(
BasicConv(3, out_plane//4, kernel_size=3, stride=1, relu=True),
BasicConv(out_plane // 4, out_plane // 2, kernel_size=1, stride=1, relu=True),
BasicConv(out_plane // 2, out_plane // 2, kernel_size=3, stride=1, relu=True),
BasicConv(out_plane // 2, out_plane-3, kernel_size=1, stride=1, relu=True)
)
self.conv = BasicConv(out_plane, out_plane, kernel_size=1, stride=1, relu=False)
def forward(self, x):
x = torch.cat([x, self.main(x)], dim=1)
return self.conv(x)
# Path: models/networks.py
class DynamicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, K=4, init_weight=True, use_FC=False):
super(DynamicConv2d, self).__init__()
assert in_channels%groups==0
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.K = K
# False
self.use_FC=use_FC
self.weight = nn.Parameter(torch.randn(K, out_channels, in_channels//groups, kernel_size, kernel_size), requires_grad=True)
if bias:
self.bias = nn.Parameter(torch.Tensor(K, out_channels))
else:
self.bias = None
if use_FC:
self.fc=nn.Sequential(nn.Linear(512, K), nn.Softmax(dim=-1))
if init_weight:
self._initialize_weights()
def _initialize_weights(self):
for i in range(self.K):
nn.init.kaiming_uniform_(self.weight[i])
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i])
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias[i], -bound, bound)
def forward(self, x, softmax_attention):#将batch视作维度变量,进行组卷积,因为组卷积的权重是不同的,动态卷积的权重也是不同的
if self.use_FC:
softmax_attention=self.fc(softmax_attention)
batch_size, in_channels, height, width = x.size()
x = x.view(1, -1, height, width)# 变化成一个维度进行组卷积
weight = self.weight.view(self.K, -1)
# 动态卷积的权重的生成, 生成的是batch_size个卷积参数(每个参数不同)
aggregate_weight = torch.mm(softmax_attention, weight).view(-1, self.in_channels, self.kernel_size, self.kernel_size)
self.aggregate_weight = aggregate_weight
if self.bias is not None:
aggregate_bias = torch.mm(softmax_attention, self.bias).view(-1)
output = F.conv2d(x, weight=aggregate_weight, bias=aggregate_bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * batch_size)
else:
output = F.conv2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * batch_size)
output = output.view(batch_size, self.out_channels, output.size(-2), output.size(-1))
return output
# Path: models/networks.py
class DynamicConvTranspose2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, K=4, init_weight=True, use_FC=False):
super(DynamicConvTranspose2d, self).__init__()
assert in_channels%groups==0
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.K = K
# False
self.use_FC=use_FC
self.weight = nn.Parameter(torch.randn(K, out_channels, in_channels//groups, kernel_size, kernel_size), requires_grad=True)
if bias:
self.bias = nn.Parameter(torch.Tensor(K, out_channels))
else:
self.bias = None
if use_FC:
self.fc=nn.Sequential(nn.Linear(512, K), nn.Softmax(dim=-1))
if init_weight:
self._initialize_weights()
def _initialize_weights(self):
for i in range(self.K):
nn.init.kaiming_uniform_(self.weight[i])
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i])
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias[i], -bound, bound)
def forward(self, x, softmax_attention):#将batch视作维度变量,进行组卷积,因为组卷积的权重是不同的,动态卷积的权重也是不同的
if self.use_FC:
softmax_attention=self.fc(softmax_attention)
batch_size, in_channels, height, width = x.size()
x = x.view(1, -1, height, width)# 变化成一个维度进行组卷积
weight = self.weight.view(self.K, -1)
# 动态卷积的权重的生成, 生成的是batch_size个卷积参数(每个参数不同)
aggregate_weight = torch.mm(softmax_attention, weight).view(-1, self.in_channels, self.kernel_size, self.kernel_size)
self.aggregate_weight = aggregate_weight
if self.bias is not None:
aggregate_bias = torch.mm(softmax_attention, self.bias).view(-1)
output = F.conv_transpose2d(x, weight=aggregate_weight, bias=aggregate_bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * batch_size)
else:
output = F.conv_transpose2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * batch_size)
output = output.view(batch_size, self.out_channels, output.size(-2), output.size(-1))
return output
# Path: models/networks.py
class AFF(nn.Module):
def __init__(self, in_channel, out_channel):
super(AFF, self).__init__()
self.conv = nn.Sequential(
BasicConv(in_channel, out_channel, kernel_size=1, stride=1, relu=True),
BasicConv(out_channel, out_channel, kernel_size=3, stride=1, relu=False)
)
def forward(self, x1, x2, x4):
x = torch.cat([x1, x2, x4], dim=1)
return self.conv(x)
# Path: models/networks.py
class BasicConv(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride, bias=True, norm=False, relu=True, transpose=False):
super(BasicConv, self).__init__()
if bias and norm:
bias = False
padding = kernel_size // 2
layers = list()
if transpose:
padding = kernel_size // 2 -1
layers.append(nn.ConvTranspose2d(in_channel, out_channel, kernel_size, padding=padding, stride=stride, bias=bias))
else:
layers.append(
nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding, stride=stride, bias=bias))
if norm:
layers.append(nn.BatchNorm2d(out_channel))
if relu:
layers.append(nn.ReLU(inplace=True))
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
# Path: models/networks.py
class TransformerBlock(nn.Module):
def __init__(self, dim, num_heads, ffn_expansion_factor, bias, LayerNorm_type):
super(TransformerBlock, self).__init__()
self.norm1 = LayerNorm(dim, LayerNorm_type)
self.attn = Attention(dim, num_heads, bias)
self.norm2 = LayerNorm(dim, LayerNorm_type)
self.ffn = FeedForward(dim, ffn_expansion_factor, bias)
def forward(self, x):
x = x + self.attn(self.norm1(x))
x = x + self.ffn(self.norm2(x))
return x
# Path: models/networks.py
class Downsample(nn.Module):
def __init__(self, n_feat):
super(Downsample, self).__init__()
self.body = nn.Sequential(nn.Conv2d(n_feat, n_feat//2, kernel_size=3, stride=1, padding=1, bias=False),
nn.PixelUnshuffle(2))
def forward(self, x):
return self.body(x)
# Path: models/networks.py
class Upsample(nn.Module):
def __init__(self, n_feat):
super(Upsample, self).__init__()
self.body = nn.Sequential(nn.Conv2d(n_feat, n_feat*2, kernel_size=3, stride=1, padding=1, bias=False),
nn.PixelShuffle(2))
def forward(self, x):
return self.body(x)
# Path: models/networks.py
class FAM(nn.Module):
def __init__(self, channel):
super(FAM, self).__init__()
self.merge = BasicConv(channel, channel, kernel_size=3, stride=1, relu=False)
def forward(self, x1, x2):
x = x1 * x2
out = x1 + self.merge(x)
return out
# Path: models/arch/default.py
from operator import xor
from torch import nn
from models.base import discriminator
from models.networks import SCM, DynamicConv2d, DynamicConvTranspose2d, AFF, BasicConv, TransformerBlock, Downsample, Upsample, FAM
from copy import deepcopy
import torch
import torch.nn.functional as F
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
ssd_adv_pred = {}
class DRNet(torch.nn.Module):
def __init__(self, in_channels, out_channels, base_channels, n_resblocks, norm=nn.BatchNorm2d,
se_reduction=None, res_scale=1, bottom_kernel_size=3, pyramid=False):
super(DRNet, self).__init__()
# Initial convolution layers
conv = nn.Conv2d
deconv = nn.ConvTranspose2d
act = nn.ReLU(True)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.device1 = torch.device("cuda:0" if torch.cuda.device_count() > 1 else "cuda:0")
# attack train
self.disc = discriminator().to(self.device)
num_blocks = [1,2,2,4]
# num_blocks = [4,6,6,8]
num_refinement_blocks = 4
heads = [1,2,4,8]
ffn_expansion_factor = 2.66
bias = False
LayerNorm_type = 'WithBias' ## Other option 'BiasFree'
dual_pixel_task = False ## True for dual-pixel defocus deblurring only. Also set inp_channels=6
# self.conv1 = ConvLayer(conv, in_channels, n_feats, kernel_size=bottom_kernel_size, stride=1, norm=None, act=act)
# self.conv2 = ConvLayer(conv, n_feats, n_feats, kernel_size=3, stride=1, norm=norm, act=act)
# self.conv3 = ConvLayer(conv, n_feats, n_feats, kernel_size=3, stride=2, norm=norm, act=act)
self.feat_extract = nn.Sequential(
DynamicConvLayer(conv, in_channels, base_channels, kernel_size=bottom_kernel_size, stride=1, norm=None, act=act),
DynamicConvLayer(conv, base_channels, base_channels, kernel_size=3, stride=1, norm=norm, act=act),
DynamicConvLayer(conv, base_channels, base_channels, kernel_size=3, stride=1, norm=norm, act=act)
).to(self.device)
self.SCM1 = SCM(base_channels * 4).to(self.device)
self.SCM2 = SCM(base_channels * 2).to(self.device)
self.FAM1 = FAM(base_channels * 4).to(self.device)
self.FAM2 = FAM(base_channels * 2).to(self.device)
self.AFFs = nn.ModuleList([
AFF(base_channels * 7, base_channels * 1),
AFF(base_channels * 7, base_channels * 2)
]).to(self.device)
self.encoder_level1 = nn.Sequential(*[TransformerBlock(dim=base_channels, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])]).to(self.device)
self.down1_2 = Downsample(base_channels).to(self.device)
self.encoder_level2 = nn.Sequential(*[TransformerBlock(dim=int(base_channels*2**1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])]).to(self.device)
self.down2_3 = Downsample(int(base_channels*2**1)).to(self.device)
self.encoder_level3 = nn.Sequential(*[TransformerBlock(dim=int(base_channels*2**2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])]).to(self.device)
self.down3_4 = Downsample(int(base_channels*2**2)).to(self.device)
self.latent = nn.Sequential(*[TransformerBlock(dim=int(base_channels*2**3), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[3])]).to(self.device)
self.up4_3 = Upsample(int(base_channels*2**3)).to(self.device1)
self.reduce_chan_level3 = nn.Conv2d(int(base_channels*2**3), int(base_channels*2**2), kernel_size=1, bias=bias).to(self.device1)
self.decoder_level3 = nn.Sequential(*[TransformerBlock(dim=int(base_channels*2**2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])]).to(self.device1)
self.up3_2 = Upsample(int(base_channels*2**2)).to(self.device1)
self.reduce_chan_level2 = nn.Conv2d(int(base_channels*2**2), int(base_channels*2**1), kernel_size=1, bias=bias).to(self.device1)
self.decoder_level2 = nn.Sequential(*[TransformerBlock(dim=int(base_channels*2**1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])]).to(self.device1)
self.up2_1 = Upsample(int(base_channels*2**1)).to(self.device1)
self.decoder_level1 = nn.Sequential(*[TransformerBlock(dim=int(base_channels*2**1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])]).to(self.device1)
self.spatial_feat_extract = nn.Sequential(
DynamicConvLayer(conv, base_channels * 2, base_channels, kernel_size=3, stride=1, norm=norm, act=act),
DynamicConvLayer(conv, base_channels, base_channels, kernel_size=3, stride=1, norm=norm, act=act),
PyramidPooling(base_channels, base_channels, scales=(4,8,16,32), ct_channels=base_channels//4),
DynamicConvLayer(conv, base_channels, out_channels, kernel_size=1, stride=1, norm=None, act=act)
).to(self.device1)
#input dynamic convolution weights to each DynamicConv2d layer via hook
for layer in self.modules():
if isinstance(layer, DynamicConv2d) or isinstance(layer, DynamicConvTranspose2d):
layer.register_forward_pre_hook(lambda module, x:(x[0], ssd_adv_pred[x[0].device]))
@property
def adv_pred(self):
return ssd_adv_pred[self.conv1.device]
def forward(self, x, adv_pred):
# adv_pred=adv_pred if adv_pred is not None else self.disc(x)
ssd_adv_pred[adv_pred.device] = adv_pred #AID预测动态卷积权重
adv_pred1 = adv_pred.to(self.device1)
ssd_adv_pred[adv_pred1.device] = adv_pred1
x_2 = F.interpolate(x, scale_factor=0.5)
# print('x_2.shape',x_2.shape)
x_4 = F.interpolate(x_2, scale_factor=0.5)
z2 = self.SCM2(x_2)
z4 = self.SCM1(x_4)
# encoder
inp_enc_level1 = self.feat_extract(x)
# print('inp_enc_level1.shape',inp_enc_level1.shape)
out_enc_level1 = self.encoder_level1(inp_enc_level1)
# print('out_enc_level1.shape',out_enc_level1.shape)
inp_fam2 = self.down1_2(out_enc_level1)
# print('inpfam2.shape',inp_fam2.shape)
# print('z2.shape',z2.shape)
inp_enc_level2 = self.FAM2(inp_fam2, z2)
out_enc_level2 = self.encoder_level2(inp_enc_level2)
inp_fam1 = self.down2_3(out_enc_level2)
inp_enc_level3 = self.FAM1(inp_fam1, z4)
| out_enc_level3 = self.encoder_level3(inp_enc_level3) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: girgle/DouZero_For_New_HLDDZ
# Path: douzero/env/move_generator.py
class MovesGener(object):
"""
This is for generating the possible combinations
"""
def __init__(self, cards_list):
self.cards_list = cards_list
self.cards_dict = collections.defaultdict(int)
for i in self.cards_list:
self.cards_dict[i] += 1
self.single_card_moves = []
self.gen_type_1_single()
self.pair_moves = []
self.gen_type_2_pair()
self.triple_cards_moves = []
self.gen_type_3_triple()
self.bomb_moves = []
self.gen_type_4_bomb()
self.final_bomb_moves = []
self.gen_type_5_king_bomb()
def _gen_serial_moves(self, cards, min_serial, repeat=1, repeat_num=0):
if repeat_num < min_serial: # at least repeat_num is min_serial
repeat_num = 0
single_cards = sorted(list(set(cards)))
seq_records = list()
moves = list()
start = i = 0
longest = 1
while i < len(single_cards):
if i + 1 < len(single_cards) and single_cards[i + 1] - single_cards[i] == 1:
longest += 1
i += 1
else:
seq_records.append((start, longest))
i += 1
start = i
longest = 1
for seq in seq_records:
if seq[1] < min_serial:
continue
start, longest = seq[0], seq[1]
longest_list = single_cards[start: start + longest]
if repeat_num == 0: # No limitation on how many sequences
steps = min_serial
while steps <= longest:
index = 0
while steps + index <= longest:
target_moves = sorted(longest_list[index: index + steps] * repeat)
moves.append(target_moves)
index += 1
steps += 1
else: # repeat_num > 0
if longest < repeat_num:
continue
index = 0
while index + repeat_num <= longest:
target_moves = sorted(longest_list[index: index + repeat_num] * repeat)
moves.append(target_moves)
index += 1
return moves
def gen_type_1_single(self):
self.single_card_moves = []
for i in set(self.cards_list):
self.single_card_moves.append([i])
return self.single_card_moves
def gen_type_2_pair(self):
self.pair_moves = []
for k, v in self.cards_dict.items():
if v >= 2:
self.pair_moves.append([k, k])
return self.pair_moves
def gen_type_3_triple(self):
self.triple_cards_moves = []
for k, v in self.cards_dict.items():
if v >= 3:
self.triple_cards_moves.append([k, k, k])
return self.triple_cards_moves
def gen_type_4_bomb(self):
self.bomb_moves = []
for k, v in self.cards_dict.items():
if v == 4:
self.bomb_moves.append([k, k, k, k])
return self.bomb_moves
def gen_type_5_king_bomb(self):
self.final_bomb_moves = []
if 20 in self.cards_list and 30 in self.cards_list:
self.final_bomb_moves.append([20, 30])
return self.final_bomb_moves
def gen_type_6_3_1(self):
result = []
for t in self.single_card_moves:
for i in self.triple_cards_moves:
if t[0] != i[0]:
result.append(t+i)
return result
def gen_type_7_3_2(self):
result = list()
for t in self.pair_moves:
for i in self.triple_cards_moves:
if t[0] != i[0]:
result.append(t+i)
return result
def gen_type_8_serial_single(self, repeat_num=0):
return self._gen_serial_moves(self.cards_list, MIN_SINGLE_CARDS, repeat=1, repeat_num=repeat_num)
def gen_type_9_serial_pair(self, repeat_num=0):
single_pairs = list()
for k, v in self.cards_dict.items():
if v >= 2:
single_pairs.append(k)
return self._gen_serial_moves(single_pairs, MIN_PAIRS, repeat=2, repeat_num=repeat_num)
def gen_type_10_serial_triple(self, repeat_num=0):
single_triples = list()
for k, v in self.cards_dict.items():
if v >= 3:
single_triples.append(k)
return self._gen_serial_moves(single_triples, MIN_TRIPLES, repeat=3, repeat_num=repeat_num)
def gen_type_11_serial_3_1(self, repeat_num=0):
serial_3_moves = self.gen_type_10_serial_triple(repeat_num=repeat_num)
serial_3_1_moves = list()
for s3 in serial_3_moves: # s3 is like [3,3,3,4,4,4]
s3_set = set(s3)
new_cards = [i for i in self.cards_list if i not in s3_set]
# Get any s3_len items from cards
subcards = select(new_cards, len(s3_set))
for i in subcards:
serial_3_1_moves.append(s3 + i)
return list(k for k, _ in itertools.groupby(serial_3_1_moves))
def gen_type_12_serial_3_2(self, repeat_num=0):
serial_3_moves = self.gen_type_10_serial_triple(repeat_num=repeat_num)
serial_3_2_moves = list()
pair_set = sorted([k for k, v in self.cards_dict.items() if v >= 2])
for s3 in serial_3_moves:
s3_set = set(s3)
pair_candidates = [i for i in pair_set if i not in s3_set]
# Get any s3_len items from cards
subcards = select(pair_candidates, len(s3_set))
for i in subcards:
serial_3_2_moves.append(sorted(s3 + i * 2))
return serial_3_2_moves
def gen_type_13_4_2(self):
four_cards = list()
for k, v in self.cards_dict.items():
if v == 4:
four_cards.append(k)
result = list()
for fc in four_cards:
cards_list = [k for k in self.cards_list if k != fc]
subcards = select(cards_list, 2)
for i in subcards:
result.append([fc]*4 + i)
return list(k for k, _ in itertools.groupby(result))
def gen_type_14_4_22(self):
four_cards = list()
for k, v in self.cards_dict.items():
if v == 4:
four_cards.append(k)
result = list()
for fc in four_cards:
cards_list = [k for k, v in self.cards_dict.items() if k != fc and v>=2]
subcards = select(cards_list, 2)
for i in subcards:
result.append([fc] * 4 + [i[0], i[0], i[1], i[1]])
return result
# generate all possible moves from given cards
def gen_moves(self):
moves = []
moves.extend(self.gen_type_1_single())
moves.extend(self.gen_type_2_pair())
moves.extend(self.gen_type_3_triple())
moves.extend(self.gen_type_4_bomb())
moves.extend(self.gen_type_5_king_bomb())
moves.extend(self.gen_type_6_3_1())
moves.extend(self.gen_type_7_3_2())
moves.extend(self.gen_type_8_serial_single())
moves.extend(self.gen_type_9_serial_pair())
moves.extend(self.gen_type_10_serial_triple())
moves.extend(self.gen_type_11_serial_3_1())
moves.extend(self.gen_type_12_serial_3_2())
moves.extend(self.gen_type_13_4_2())
moves.extend(self.gen_type_14_4_22())
return moves
# Path: douzero/env/move_detector.py
def get_move_type(move):
move_size = len(move)
move_dict = collections.Counter(move)
if move_size == 0:
return {'type': TYPE_0_PASS}
if move_size == 1:
return {'type': TYPE_1_SINGLE, 'rank': move[0]}
if move_size == 2:
if move[0] == move[1]:
return {'type': TYPE_2_PAIR, 'rank': move[0]}
elif move == [20, 30]: # Kings
return {'type': TYPE_5_KING_BOMB}
else:
return {'type': TYPE_15_WRONG}
if move_size == 3:
if len(move_dict) == 1:
return {'type': TYPE_3_TRIPLE, 'rank': move[0]}
else:
return {'type': TYPE_15_WRONG}
if move_size == 4:
if len(move_dict) == 1:
return {'type': TYPE_4_BOMB, 'rank': move[0]}
elif len(move_dict) == 2:
if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]:
return {'type': TYPE_6_3_1, 'rank': move[1]}
else:
return {'type': TYPE_15_WRONG}
else:
return {'type': TYPE_15_WRONG}
if is_continuous_seq(move):
return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)}
if move_size == 5:
if len(move_dict) == 2:
return {'type': TYPE_7_3_2, 'rank': move[2]}
else:
return {'type': TYPE_15_WRONG}
count_dict = collections.defaultdict(int)
for c, n in move_dict.items():
count_dict[n] += 1
if move_size == 6:
if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \
(count_dict.get(2) == 1 or count_dict.get(1) == 2):
return {'type': TYPE_13_4_2, 'rank': move[2]}
if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and
(count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2):
return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])}
mdkeys = sorted(move_dict.keys())
if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys):
return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)}
if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys):
return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)}
# Check Type 11 (serial 3+1) and Type 12 (serial 3+2)
if count_dict.get(3, 0) >= MIN_TRIPLES:
serial_3 = list()
single = list()
pair = list()
for k, v in move_dict.items():
if v == 3:
serial_3.append(k)
elif v == 1:
single.append(k)
elif v == 2:
pair.append(k)
else: # no other possibilities
return {'type': TYPE_15_WRONG}
serial_3.sort()
if is_continuous_seq(serial_3):
if len(serial_3) == len(single)+len(pair)*2:
return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)}
if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2:
return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)}
if len(serial_3) == 4:
if is_continuous_seq(serial_3[1:]):
return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1}
if is_continuous_seq(serial_3[:-1]):
return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1}
return {'type': TYPE_15_WRONG}
# Path: douzero/env/move_selector.py
def common_handle(moves, rival_move):
def filter_type_1_single(moves, rival_move):
def filter_type_2_pair(moves, rival_move):
def filter_type_3_triple(moves, rival_move):
def filter_type_4_bomb(moves, rival_move):
def filter_type_6_3_1(moves, rival_move):
def filter_type_7_3_2(moves, rival_move):
def filter_type_8_serial_single(moves, rival_move):
def filter_type_9_serial_pair(moves, rival_move):
def filter_type_10_serial_triple(moves, rival_move):
def filter_type_11_serial_3_1(moves, rival_move):
def filter_type_12_serial_3_2(moves, rival_move):
def filter_type_13_4_2(moves, rival_move):
def filter_type_14_4_22(moves, rival_move):
# Path: search_utility.py
import time
from douzero.env.move_generator import MovesGener
from douzero.env.move_detector import get_move_type
from douzero.env import move_selector
return False
for item in mlist:
if not isinstance(item, type):
return False
return True
def action_in_tree(path_list, action):
for ac in path_list:
ac[0].sort()
if action == ac[0]:
return ac
return None
def search_actions(my_cards, other_cards, path_list, rival_move=None, prev_moves=None):
if len(path_list) > 100:
return None
if prev_moves is None:
my_cards.sort()
other_cards.sort()
my_gener = MovesGener(my_cards)
other_gener = MovesGener(other_cards)
other_bombs = other_gener.gen_type_4_bomb()
other_bombs.extend(other_gener.gen_type_5_king_bomb())
my_bombs = my_gener.gen_type_4_bomb()
my_bombs.extend(my_gener.gen_type_5_king_bomb())
legal_move_tree = []
rival_move_info = {}
type_range = [4, 5, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
if rival_move is not None:
if len(rival_move) > 0:
rival_move_info = get_move_type(rival_move)
type_range = [4, 5, rival_move_info["type"]]
else:
rival_move = None
for mtype in type_range:
my_moves = my_gener.gen_moves_by_type(mtype)
if len(my_moves) == 0:
continue
if mtype == 4:
other_moves = other_bombs
else:
other_moves = other_gener.gen_moves_by_type(mtype)
for move in my_moves:
if len(move) != len(my_cards):
if mtype != 4 and mtype != 5 and len(other_bombs) > 0:
break
if len(move_selector.filter_type_n(mtype, other_moves, move)) == 0:
if rival_move is not None:
move_info = get_move_type(move)
if "rank" in move_info and "rank" in rival_move_info and move_info["rank"] <= rival_move_info["rank"]:
continue
if "len" in move_info and move_info["len"] != rival_move_info["len"]:
continue
if rival_move_info["type"] == 5:
continue
new_cards = my_cards.copy()
for card in move:
new_cards.remove(card)
if prev_moves is not None:
new_prev = prev_moves.copy()
new_prev.append(move)
else:
new_prev = [move]
actions = search_actions(new_cards, other_cards, path_list, prev_moves=new_prev)
del new_prev
del new_cards
if actions is not None and len(actions) > 0:
legal_move_tree.append([move, actions])
else:
if rival_move is not None:
move_info = get_move_type(move)
if "rank" in move_info and "rank" in rival_move_info and move_info["rank"] <= rival_move_info["rank"]:
continue
if "len" in move_info and move_info["len"] != rival_move_info["len"]:
continue
if rival_move_info["type"] == 5:
continue
legal_move_tree.append(move)
if prev_moves is not None:
new_path = prev_moves.copy()
new_path.append(move)
path_list.append(new_path)
else:
path_list.append([move])
legal_moves_count = len(legal_move_tree)
del my_gener, other_gener, my_bombs, other_bombs, my_cards, other_cards, legal_move_tree
return None
# if legal_moves_count == 0:
# return None
# if legal_moves_count == 1:
# return legal_move_tree[0]
# return legal_move_tree
def eval_path(path):
bomb = 0
for action in path:
if 30 in action and 20 in action or len(action) == 4 and len(set(action)) == 1:
bomb += 1
return 1 + bomb - len(path) * 0.05
def select_optimal_path(path_list):
if len(path_list) != 0:
max_path = max(path_list, key=lambda x: eval_path(x))
for action in max_path:
action.sort()
return max_path
else:
return None
def check_42(path):
for action in path:
move_type = get_move_type(action)
if move_type["type"] == 13 or move_type["type"] == 14:
| return True |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yongzhuo/ChatGLM3-SFT
# Path: chatglm3_sft/ft_chatglm3/config.py
CUDA_VISIBLE_DEVICES = "0"
# Path: chatglm3_sft/ft_chatglm3/config.py
USE_TORCH = "1"
# Path: chatglm3_sft/ft_chatglm3/config.py
CPU_NUMS = "9"
# Path: chatglm3_sft/models/modeling_chatglm.py
_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM"
_CONFIG_FOR_DOC = "ChatGLMConfig"
CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
"THUDM/chatglm3-6b",
# See all ChatGLM models at https://huggingface.co/models?filter=chatglm
]
def default_init(cls, *args, **kwargs):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
def __init__(self, config: ChatGLMConfig):
def forward(self, prefix: torch.Tensor):
def split_tensor_along_last_dim(
tensor: torch.Tensor,
num_partitions: int,
contiguous_split_chunks: bool = False,
) -> List[torch.Tensor]:
def __init__(self, dim, original_impl=False, device=None, dtype=None):
def forward_impl(
self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
):
def forward(self, max_seq_len, offset=0):
def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
def forward(self, hidden_states: torch.Tensor):
def __init__(self, config: ChatGLMConfig, layer_number):
def forward(self, query_layer, key_layer, value_layer, attention_mask):
def __init__(self, config: ChatGLMConfig, layer_number, device=None):
def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):
def forward(
self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
):
def _config_to_kwargs(args):
def __init__(self, config: ChatGLMConfig, device=None):
def swiglu(x):
def forward(self, hidden_states):
def __init__(self, config: ChatGLMConfig, layer_number, device=None):
def forward(
self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,
):
def __init__(self, config: ChatGLMConfig, device=None):
def build_layer(layer_number):
def _get_layer(self, layer_number):
def forward(
self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,
use_cache: Optional[bool] = True,
output_hidden_states: Optional[bool] = False,
):
def _init_weights(self, module):
def get_masks(self, input_ids, past_key_values, padding_mask=None):
def get_position_ids(self, input_ids, device):
def _set_gradient_checkpointing(self, module, value=False):
def __init__(self, config: ChatGLMConfig, device=None):
def forward(self, input_ids):
def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
def get_input_embeddings(self):
def set_input_embeddings(self, new_embeddings: torch.Tensor):
def get_prompt(self, batch_size, device, dtype=torch.half):
def forward(
self,
input_ids,
position_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.BoolTensor] = None,
full_attention_mask: Optional[torch.BoolTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
def quantize(self, weight_bit_width: int):
def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
def _update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: Dict[str, Any],
is_encoder_decoder: bool = False,
standardize_cache_format: bool = False,
) -> Dict[str, Any]:
def prepare_inputs_for_generation(
self,
input_ids: torch.LongTensor,
past_key_values: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
is_first_forward: bool = True,
**kwargs
) -> dict:
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
full_attention_mask: Optional[torch.BoolTensor] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
return_last_logit: Optional[bool] = False,
):
def _reorder_cache(
past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
def process_response(self, output, history):
def tool_call(**kwargs):
def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, role: str = "user",
max_length: int = 8192, num_beams=1, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,
**kwargs):
def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, role: str = "user",
past_key_values=None,max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8,
logits_processor=None, return_past_key_values=False, **kwargs):
def stream_generate(
self,
input_ids,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
return_past_key_values=False,
**kwargs,
):
def quantize(self, bits: int, empty_init=False, device=None, **kwargs):
def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
full_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:
class InvalidScoreLogitsProcessor(LogitsProcessor):
class PrefixEncoder(torch.nn.Module):
class RotaryEmbedding(nn.Module):
class RMSNorm(torch.nn.Module):
class CoreAttention(torch.nn.Module):
class SelfAttention(torch.nn.Module):
class MLP(torch.nn.Module):
class GLMBlock(torch.nn.Module):
class GLMTransformer(torch.nn.Module):
class ChatGLMPreTrainedModel(PreTrainedModel):
class Embedding(torch.nn.Module):
class ChatGLMModel(ChatGLMPreTrainedModel):
class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
class ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):
# Path: chatglm3_sft/models/tokenization_chatglm.py
class ChatGLMTokenizer(PreTrainedTokenizer):
vocab_files_names = {"vocab_file": "tokenizer.model"}
model_input_names = ["input_ids", "attention_mask", "position_ids"]
def __init__(self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, **kwargs):
self.name = "GLMTokenizer"
self.vocab_file = vocab_file
self.tokenizer = SPTokenizer(vocab_file)
self.special_tokens = {
"<bos>": self.tokenizer.bos_id,
"<eos>": self.tokenizer.eos_id,
"<pad>": self.tokenizer.pad_id
}
super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
def get_command(self, token):
if token in self.special_tokens:
return self.special_tokens[token]
assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}"
return self.tokenizer.special_tokens[token]
@property
def unk_token(self) -> str:
return "<unk>"
@property
def pad_token(self) -> str:
return "<unk>"
@property
def pad_token_id(self):
return self.get_command("<pad>")
@property
def eos_token(self) -> str:
return "</s>"
@property
def eos_token_id(self):
return self.get_command("<eos>")
@property
def vocab_size(self):
return self.tokenizer.n_words
def get_vocab(self):
""" Returns vocab as a dict """
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text, **kwargs):
return self.tokenizer.tokenize(text)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.tokenizer.convert_token_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.tokenizer.convert_id_to_token(index)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return self.tokenizer.decode_tokens(tokens)
def save_vocabulary(self, save_directory, filename_prefix=None):
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, self.vocab_files_names["vocab_file"]
)
else:
vocab_file = save_directory
with open(self.vocab_file, 'rb') as fin:
proto_str = fin.read()
with open(vocab_file, "wb") as writer:
writer.write(proto_str)
return (vocab_file,)
def get_prefix_tokens(self):
prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")]
return prefix_tokens
def build_single_message(self, role, metadata, message):
assert role in ["system", "user", "assistant", "observation"], role
role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n")
message_tokens = self.tokenizer.encode(message)
tokens = role_tokens + message_tokens
return tokens
def build_chat_input(self, query, history=None, role="user"):
if history is None:
history = []
input_ids = []
for item in history:
content = item["content"]
if item["role"] == "system" and "tools" in item:
content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False)
input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content))
input_ids.extend(self.build_single_message(role, "", query))
input_ids.extend([self.get_command("<|assistant|>")])
return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
prefix_tokens = self.get_prefix_tokens()
token_ids_0 = prefix_tokens + token_ids_0
if token_ids_1 is not None:
token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")]
return token_ids_0
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
# assert self.padding_side == "left"
required_input = encoded_inputs[self.model_input_names[0]]
seq_length = len(required_input)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * seq_length
if "position_ids" not in encoded_inputs:
encoded_inputs["position_ids"] = list(range(seq_length))
if needs_to_be_padded:
difference = max_length - len(required_input)
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "position_ids" in encoded_inputs:
encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
return encoded_inputs
# Path: chatglm3_sft/ft_chatglm3/config.py
PATH_MODEL_PRETRAIN = ""
# Path: chatglm3_sft/ft_chatglm3/config.py
DATA_PATH = "../dataset/alpaca_gpt4_data_zh.json"
# Path: chatglm3_sft/ft_chatglm3/config.py
MODEL_SAVE_DIR = "model_chatglm3_sft"
# Path: chatglm3_sft/ft_chatglm3/config.py
REPO_ID = "THUDM/chatglm3-6b"
# Path: chatglm3_sft/ft_chatglm3/config.py
MICRO_BATCH_SIZE = 4 # default=4 # this could actually be 5 but i like powers of 2
# Path: chatglm3_sft/ft_chatglm3/config.py
BATCH_SIZE = 128
# Path: chatglm3_sft/ft_chatglm3/config.py
GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE
# Path: chatglm3_sft/ft_chatglm3/config.py
LEARNING_RATE = 3e-4 # default=3e-4 # the Karpathy constant
# Path: chatglm3_sft/ft_chatglm3/config.py
EPOCHS = 3 # default=3 # we don't always need 3 tbh
# Path: chatglm3_sft/ft_chatglm3/config.py
SAVE_STEPS = 382
# Path: chatglm3_sft/ft_chatglm3/config.py
VAL_SET_SIZE = 0
# Path: chatglm3_sft/ft_chatglm3/config.py
TARGET_MODULES = ["query_key_value"]
# Path: chatglm3_sft/ft_chatglm3/config.py
IS_PARALLELIZABLE = False
# Path: chatglm3_sft/ft_chatglm3/config.py
MODEL_PARALLEL = False
# Path: chatglm3_sft/ft_chatglm3/config.py
USE_CACHE = False
# Path: chatglm3_sft/ft_chatglm3/config.py
MAX_LENGTH_Q = 256 - 2 # default=128 - 2 # 512 - 2
# Path: chatglm3_sft/ft_chatglm3/config.py
MAX_LENGTH_A = 256 - 2 # default=128 - 2 # 512 - 2
# Path: chatglm3_sft/ft_chatglm3/config.py
MAX_LENGTH_QA = MAX_LENGTH_Q + MAX_LENGTH_A + 4
# Path: chatglm3_sft/ft_chatglm3/config.py
LORA_DROPOUT = 0.05
# Path: chatglm3_sft/ft_chatglm3/config.py
LORA_ALPHA = 16
# Path: chatglm3_sft/ft_chatglm3/config.py
LORA_R = 8
# Path: chatglm3_sft/ft_chatglm3/train.py
import random
import copy
import sys
import os
import torch.nn as nn
import transformers
import torch
from chatglm3_sft.ft_chatglm3.config import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig)
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.modeling_utils import unwrap_model
from tensorboardX import SummaryWriter
from datasets import load_dataset
from chatglm3_sft.models.modeling_chatglm import ChatGLMForConditionalGeneration, ChatGLMConfig
from chatglm3_sft.models.tokenization_chatglm import ChatGLMTokenizer
from chatglm3_sft.ft_chatglm3.config import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID
from chatglm3_sft.ft_chatglm3.config import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS
from chatglm3_sft.ft_chatglm3.config import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES
from chatglm3_sft.ft_chatglm3.config import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE
from chatglm3_sft.ft_chatglm3.config import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA
from chatglm3_sft.ft_chatglm3.config import LORA_DROPOUT, LORA_ALPHA, LORA_R
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2023/3/5 21:04
# @author : Mo
# @function: chatglm3-sft
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
print(path_root)
sys.path.append(path_root)
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072"
os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES
os.environ["USE_TORCH"] = USE_TORCH
os.environ["OMP_NUM_THREADS"] = CPU_NUMS # export OMP_NUM_THREADS=1
os.environ["OPENBLAS_NUM_THREADS"] = CPU_NUMS # export OPENBLAS_NUM_THREADS=1
os.environ["MKL_NUM_THREADS"] = CPU_NUMS # export MKL_NUM_THREADS=1
os.environ["VECLIB_MAXIMUM_THREADS"] = CPU_NUMS # export VECLIB_MAXIMUM_THREADS=1
os.environ["NUMEXPR_NUM_THREADS"] = CPU_NUMS # export NUMEXPR_NUM_THREADS=1
# import bitsandbytes as bnb
def save_model_state(model, config=None, model_save_dir="./", model_name="adapter_model.bin"):
""" 仅保存 有梯度 的 模型参数(推荐使用) """
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
# save config
if config:
config.save_pretrained(model_save_dir)
# config.to_dict()
# save model
path_model = os.path.join(model_save_dir, model_name)
grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters()
if v.requires_grad == True}
torch.save(grad_params_dict, path_model)
print("******model_save_path is {}******".format(path_model))
def print_named_parameters(model, use_print_data=False):
""" 打印模型训练参数/数据类型信息 """
trainable_params = 0
all_param = 0
for name, param in model.named_parameters():
if use_print_data:
print((name, param.data.dtype, param.requires_grad, param.data))
else:
print((name, param.data.dtype, param.requires_grad))
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
num_params = param.ds_numel
all_param += num_params
if param.requires_grad:
trainable_params += num_params
print(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}")
def prepare_model_for_half_training(model, output_embedding_layer_name="lm_head",
use_gradient_checkpointing=True, layer_norm_names=["layer_norm"]):
r"""
| This method wrapps the entire protocol for preparing a model before running a training. This includes: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: forefy/eburger
# Path: eburger/utils/cli_args.py
# Path: eburger/utils/filesystem.py
def create_directory_if_not_exists(directory_path):
# Check if the directory already exists
if not os.path.exists(directory_path):
# Create the directory
os.makedirs(directory_path)
log("info", f"Created directory: {directory_path}")
# Path: eburger/utils/filesystem.py
def create_or_empty_directory(directory_path):
# Check if the directory already exists
if os.path.exists(directory_path):
# Empty the directory by removing all its contents
shutil.rmtree(directory_path)
os.makedirs(directory_path)
log("info", f"Emptied and re-created directory: {directory_path}")
else:
# Create the directory if it does not exist
os.makedirs(directory_path)
log("info", f"Created directory: {directory_path}")
# Path: eburger/utils/filesystem.py
def find_and_read_sol_file(folder_path):
# Search for .sol files recursively in the given folder
excluded_folders = settings.excluded_dirs + ["mocks", "lib"]
for root, dirs, files in os.walk(folder_path):
for file in files:
sol_file_path = os.path.join(root, file)
if any(
fnmatch.fnmatch(sol_file_path, f"*/{pattern}/*")
for pattern in excluded_folders
):
continue
if (
file.endswith(".sol")
and not file.endswith(".t.sol")
and not file.endswith(".s.sol")
):
with open(sol_file_path, "r") as input_file:
head = [line for line, _ in zip(input_file, range(10))]
if any("pragma" in line for line in head):
return sol_file_path
log("error", "Can't parse path given in argument.")
# Path: eburger/utils/filesystem.py
def get_foundry_ast_json(forge_out_dir) -> dict:
json_files = [f for f in os.listdir(forge_out_dir) if f.endswith(".json")]
if not json_files:
log("error", "forge build generated no output.")
if len(json_files) > 1:
log(
"warning",
"Multiple forge-output files found, choosing the latest.",
)
latest_file = max(
json_files,
key=lambda x: os.path.getctime(os.path.join(forge_out_dir, x)),
)
latest_file_path = os.path.join(forge_out_dir, latest_file)
with open(latest_file_path, "r") as f:
ast_json = json.load(f)
return ast_json["output"]
# Path: eburger/utils/filesystem.py
def get_hardhat_ast_json(hardhat_out_dir) -> dict:
json_files = [f for f in os.listdir(hardhat_out_dir) if f.endswith(".json")]
if not json_files:
log("error", "npx hardhat compile generated no output.")
if len(json_files) > 1:
log(
"warning",
"Multiple hardhat output files found, choosing the latest.",
)
latest_file = max(
json_files,
key=lambda x: os.path.getctime(os.path.join(hardhat_out_dir, x)),
)
latest_file_path = os.path.join(hardhat_out_dir, latest_file)
with open(latest_file_path, "r") as f:
ast_json = json.load(f)
return ast_json["output"]
# Path: eburger/utils/filesystem.py
def get_solidity_version_from_file(solidity_file_or_folder: str) -> str:
solc_required_version = None
version_pattern = r"pragma\s+solidity\s+([^;]+);"
version_number_pattern = r"\d+\.\d+\.\d+|\d+\.\d+"
with open(solidity_file_or_folder, "r") as f:
content = f.read()
match = re.search(version_pattern, content)
if match:
version_match = match.group(1)
# Extract all version numbers
version_numbers = re.findall(version_number_pattern, version_match)
if version_numbers:
# If there is an upper limit, choose the version just below the upper limit
if "<" in version_match and len(version_numbers) > 1:
upper_version = version_numbers[-1]
lower_version = (
version_numbers[0] if len(version_numbers) > 1 else None
)
major, minor, *patch = map(int, upper_version.split("."))
lower_major, lower_minor, *lower_patch = (
map(int, lower_version.split(".")) if lower_version else (0, 0)
)
if minor > 0 and (
major > lower_major
or (major == lower_major and minor - 1 >= lower_minor)
):
# Choose the highest minor version below the upper limit
solc_required_version = f"{major}.{minor - 1}.0"
else:
# If the upper limit is too close to the lower limit, return the lower limit
solc_required_version = lower_version
else:
# Return the highest version number found
solc_required_version = version_numbers[-1]
if solc_required_version is None:
log("warning", "Couldn't extract solidity version from file, trying 0.8.20")
solc_required_version = "0.8.20"
return solc_required_version
# Path: eburger/utils/helpers.py
def construct_solc_cmdline(path_type: str, compilation_source_path: str) -> str:
solc_cmdline = "solc"
if args.solc_remappings:
solc_cmdline += " "
solc_cmdline += " ".join(args.solc_remappings)
if path_type == "folder":
solidity_files = get_all_solidity_files(args.solidity_file_or_folder)
compilation_source_path = " ".join(solidity_files)
solc_cmdline += f" --allow-paths . --combined-json abi,ast,bin,bin-runtime,srcmap,srcmap-runtime,userdoc,devdoc,hashes {compilation_source_path}"
return solc_cmdline
# Path: eburger/utils/helpers.py
def get_filename_from_path(file_path: str) -> tuple:
if args.project_name:
filename = args.project_name
else:
filename_match = re.search(r"/([^/]+)\.sol$", file_path)
filename = filename_match.group(1) if filename_match else None
filename = f"{filename}_{datetime.now().strftime('%m%y')}"
output_filename = settings.outputs_dir / f"{filename}.json"
return filename, output_filename
# Path: eburger/utils/helpers.py
def is_valid_json(json_string):
if not json_string:
return False
try:
json.loads("".join(json_string))
return True
except ValueError:
return False
# Path: eburger/utils/helpers.py
def run_command(command, directory=None, shell=False, live_output=False):
log("info", f"{command}")
results = []
errors = []
process = subprocess.Popen(
command if shell else shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
shell=shell,
cwd=directory,
)
while True:
output = process.stdout.readline()
error = process.stderr.readline()
if output == "" and process.poll() is not None:
break
if output:
output_stripped = output.strip()
if live_output:
log("info", output_stripped)
results.append(output_stripped)
if error:
error_stripped = error.strip()
if live_output:
log("error", error_stripped)
errors.append(error_stripped)
return results, errors
# Path: eburger/utils/installers.py
def install_foundry_if_not_found():
forge_binary_found = False
try:
run_command("forge -V")
forge_binary_found = True
except FileNotFoundError:
pass
if not forge_binary_found:
log("info", "forge wasn't found on the system, trying to install.")
run_command(
"curl -L https://foundry.paradigm.xyz | bash",
shell=True,
)
run_command("foundryup")
try:
run_command("forge -V")
log("info", "Successfully installed forge.")
except:
log(
"error",
"Couldn't automatically install forge, please install manually and try again.",
)
# Path: eburger/utils/installers.py
def install_hardhat_if_not_found():
hardhat_found = False
npm_found = False
npx_found = False
try:
res, err = run_command("npx hardhat help", directory=settings.project_root)
if err:
raise FileNotFoundError
hardhat_found = True
except FileNotFoundError:
pass
if not hardhat_found:
log("info", "Local hardhat not found on project, installing.")
try:
run_command("npm -v")
npm_found = True
except FileNotFoundError:
log(
"error",
"Can't automatically install hardhat without npm being installed manually first, please install npm and run again.",
)
if npm_found:
try:
run_command("npx -v")
npx_found = True
except FileNotFoundError:
log(
"error",
"Couldn't automatically install hardhat, please install manually and try again.",
)
if not npx_found:
try:
run_command("npm install -g npx")
run_command("npx -v")
except FileNotFoundError:
log(
"error",
"Couldn't automatically install hardhat, please install manually and try again.",
)
try:
if os.path.isfile(os.path.join(settings.project_root, "yarn.lock")):
run_command(
"yarn add --dev hardhat",
directory=settings.project_root,
)
else:
run_command(
"npm install --save-dev hardhat",
directory=settings.project_root,
)
except:
log(
"error",
"Couldn't automatically install hardhat, please install manually and try again.",
)
# Path: eburger/utils/installers.py
def set_solc_compiler_version(solc_required_version):
log("info", f"Trying to set solc to version {solc_required_version}")
solc_use_res, errors = run_command(f"solc-select use {solc_required_version}")
if not solc_use_res or errors:
log("info", "Trying to install missing solc version")
solc_select_install_res, errors = run_command(
f"solc-select install {solc_required_version}"
)
print(solc_select_install_res, errors)
solc_select_use_res, errors = run_command(
f"solc-select use {solc_required_version}"
)
print(solc_select_use_res, errors)
if not solc_use_res or errors:
log("error", "Failed to install required solc version")
log(
"info",
"Successfully set solc version, trying to compile contract.",
)
# Path: eburger/utils/logger.py
def log(type: str, message: str):
match type:
case "success":
if "success" not in args.no:
print(f"[{color.Success} 🍔 Success {color.Default}] {message}")
case "error":
print(f"[{color.Error} Error {color.Default}] {message}")
sys.exit(0)
case "warning":
if "warning" not in args.no:
print(f"[{color.Warning} Warning {color.Default}] {message}")
case "info":
if "info" not in args.no:
print(f"[{color.Info} Info {color.Default}] {message}")
case "insights":
# json_printable = json.dumps(message, indent=4)
# print(json_printable)
if "insights" not in args.no:
for item in message:
name = item.get("name")
severity = item.get("severity")
results = item.get("results")
# Check a sample result to ensure correct structure
try:
results[0]["file"]
except Exception:
log("warning", f"Bad results for {item.get('name')}, skipping.")
continue
occurrences = construct_insight_occurrences(results)
match severity:
case "High":
severity = f"[{color.Error} ❗️High {color.Default}]"
case "Medium":
severity = f"[{color.Warning} ❗️Medium {color.Default}]"
case "Low":
severity = f"[{color.Info} ❗️Low {color.Default}]"
print(f"{severity} {name} at:")
for occurrence in occurrences:
print(f" {occurrence}")
# Path: eburger/serializer.py
def parse_solidity_ast(ast_json, G):
"""
Parses the entire Solidity AST from the JSON representation.
"""
root_nodes = []
for key, node in ast_json.get("sources", {}).items():
ast_node = node.get("AST", node.get("ast", {}))
root_node, G = parse_ast_node(ast_node, G)
if root_node:
root_nodes.append(root_node)
return root_nodes, G
# Path: eburger/serializer.py
def reduce_json(ast_json):
# Maintain original file list array
def extract_file_list_from_ast(ast_data):
if "sources" in ast_data:
return list(ast_data["sources"].keys())
return []
original_file_list = extract_file_list_from_ast(ast_json)
# Function to remove keys in-place from a dictionary
def remove_keys_in_place(dictionary):
removal_list = [
key
for key in dictionary
if any(substring in key for substring in settings.excluded_contracts)
]
for key in removal_list:
log("info", f"Excluding {key}")
del dictionary[key]
for section in ["sources", "contracts"]:
if section in ast_json:
remove_keys_in_place(ast_json[section])
return ast_json, original_file_list
# Path: eburger/utils/outputs.py
def draw_graph(file_name, G):
nt = Network("800px", "1800px", select_menu=True, directed=True)
nt.from_nx(G)
nt.show_buttons(filter_=[])
original_stdout = sys.stdout
sys.stdout = Silent()
file_path = settings.outputs_dir / f"{file_name}.html"
nt.show(str(file_path), notebook=False)
sys.stdout = original_stdout
graph_vis_lib_path = settings.outputs_dir / "lib"
if os.path.exists(graph_vis_lib_path):
shutil.rmtree(graph_vis_lib_path)
graph_html_folders = ["bindings", "tom-select", "vis-*"]
move_multiple_dirs("lib", graph_html_folders, graph_vis_lib_path)
# Path: eburger/utils/outputs.py
def save_as_json(file_path, json_data):
with open(file_path, "w") as outfile:
json.dump(json_data, outfile, indent=4)
# Path: eburger/utils/outputs.py
def save_python_ast(file_name, data):
orig_name = file_name
file_path = settings.outputs_dir / f"{file_name}.py"
with open(file_path, "w") as file:
data_str = repr(data)
file.write(f"from eburger.models import *\n\n{orig_name} = {data_str}\n")
# Path: eburger/yaml_parser.py
def process_files_concurrently(ast_data: dict, src_file_list: list):
yaml_files = list(settings.templates_directory.glob("*.yaml"))
log(
"info",
f"Loaded {color.Success}{len(yaml_files)}{color.Default} templates for execution.",
)
insights = []
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [
executor.submit(process_yaml, str(file_path), ast_data, src_file_list)
for file_path in yaml_files
]
for future in concurrent.futures.as_completed(futures):
try:
results = future.result(timeout=30) # 30 seconds timeout
if results.get("results"):
insights.append(results)
except concurrent.futures.TimeoutError:
log("error", "A task has timed out.")
except Exception as e:
log("error", f"Unhandled error: {e}")
log(
"info",
f"{color.Error}{len(insights)}{color.Default} insight{'s were' if (len(insights) > 1 or len(insights) == 0) else ' was'} found by eBurger.",
)
if insights:
log("insights", insights)
return insights
# Path: eburger/main.py
from datetime import datetime
from pathlib import Path
from eburger.utils.cli_args import args
from eburger.utils.filesystem import (
create_directory_if_not_exists,
create_or_empty_directory,
find_and_read_sol_file,
get_foundry_ast_json,
get_hardhat_ast_json,
get_solidity_version_from_file,
)
from eburger.utils.helpers import (
construct_solc_cmdline,
get_filename_from_path,
is_valid_json,
run_command,
)
from eburger.utils.installers import (
install_foundry_if_not_found,
install_hardhat_if_not_found,
set_solc_compiler_version,
)
from eburger.utils.logger import log
from eburger.serializer import parse_solidity_ast, reduce_json
from eburger.utils.outputs import draw_graph, save_as_json, save_python_ast
from eburger.yaml_parser import process_files_concurrently
import json
import os
import re
import shutil
import sys
import networkx as nx
import eburger.settings as settings
if path_type is not None:
# Foundry compilation flow
if path_type == "foundry":
log("info", "Foundry project detected, compiling using forge.")
install_foundry_if_not_found()
if args.solc_remappings:
log("warning", "Ignoring the -r option in foundry based projects.")
run_command(f"forge clean", args.solidity_file_or_folder)
forge_out_dir = settings.outputs_dir / "forge-output"
create_or_empty_directory(forge_out_dir)
build_output_lines, _ = run_command(
f"forge build --force --skip {' '.join(settings.excluded_dirs)} --build-info --build-info-path {forge_out_dir}",
args.solidity_file_or_folder,
)
for line in build_output_lines:
log("info", line)
sample_file_path = find_and_read_sol_file(args.solidity_file_or_folder)
filename, output_filename = get_filename_from_path(sample_file_path)
ast_json = get_foundry_ast_json(forge_out_dir)
ast_json, src_file_list = reduce_json(ast_json)
save_as_json(output_filename, ast_json)
# Hardhat compilation flow
if path_type == "hardhat":
log("info", "Hardhat project detected, compiling using hardhat.")
install_hardhat_if_not_found()
if args.solc_remappings:
log("warning", "Ignoring the -r option in hardhat based projects.")
run_command(f"npx hardhat clean", directory=settings.project_root)
build_output_lines, _ = run_command(
f"npx hardhat compile --force",
directory=settings.project_root,
)
for line in build_output_lines:
log("info", line)
# Copy compilation results to .eburger
expected_hardhat_outfiles = os.path.join(
args.solidity_file_or_folder, "artifacts", "build-info"
)
if not os.path.isdir(expected_hardhat_outfiles):
log(
"error",
f"Hardhat's compilation files were not found in expected location {expected_hardhat_outfiles}",
)
hardhat_out_dir = settings.outputs_dir / "hardhat-output"
if os.path.exists(hardhat_out_dir):
shutil.rmtree(hardhat_out_dir)
shutil.copytree(expected_hardhat_outfiles, hardhat_out_dir)
sample_file_path = find_and_read_sol_file(args.solidity_file_or_folder)
filename, output_filename = get_filename_from_path(sample_file_path)
ast_json = get_hardhat_ast_json(hardhat_out_dir)
ast_json, src_file_list = reduce_json(ast_json)
save_as_json(output_filename, ast_json)
# solc compilation flow
elif path_type in ["file", "folder"]:
sample_file_path = args.solidity_file_or_folder
compilation_source_path = args.solidity_file_or_folder
if path_type == "folder":
sample_file_path = find_and_read_sol_file(args.solidity_file_or_folder)
filename, output_filename = get_filename_from_path(sample_file_path)
if args.solc_compiler_version:
solc_required_version = args.solc_compiler_version
else:
solc_required_version = get_solidity_version_from_file(sample_file_path)
set_solc_compiler_version(solc_required_version)
solc_cmdline = construct_solc_cmdline(path_type, compilation_source_path)
if solc_cmdline is None:
log("error", "Error constructing solc command line")
solc_compile_res, _ = run_command(solc_cmdline)
if not is_valid_json(solc_compile_res):
error_string = "Locally installed solc errored out trying to compile the contract. Please review comiler warnings above"
if not args.solc_remappings:
error_string += (
"or see if library remappings (using the -r option) are needed"
)
if not args.solc_compiler_version:
error_string += ", or try specifing the solidity compiler version (using the -s option)"
error_string += "."
log(
"error",
error_string,
)
solc_compile_res_parsed = json.loads("".join(solc_compile_res))
ast_json, src_file_list = reduce_json(solc_compile_res_parsed)
save_as_json(output_filename, solc_compile_res_parsed)
# Parse AST
G = nx.MultiDiGraph()
ast_roots, G = parse_solidity_ast(ast_json, G)
# Draw graph
settings.outputs_dir / filename
draw_graph(settings.outputs_dir / filename, G)
save_python_ast(filename, ast_roots)
# Parse YAML templates
if args.templates_path:
settings.templates_directory = Path(args.templates_path)
log("info", f"Templates path: {settings.templates_directory}")
insights = process_files_concurrently(ast_json, src_file_list)
if insights:
# Same data saved twice - once for historic reference and one for clarity
| insights_json_path = settings.outputs_dir / f"eburger_output_{filename}.json" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: monofy-org/monofy-ai
# Path: utils/startup_args.py
class DefaultArgs:
def print_help():
# Path: settings.py
HOST = "127.0.0.1"
# Path: settings.py
IDLE_OFFLOAD_TIME = 60
# Path: settings.py
MEDIA_CACHE_DIR = ".cache"
# Path: settings.py
PORT = 5000
# Path: settings.py
SD_USE_SDXL = True # Set to True for SDXL/turbo models
# Path: utils/console_logging.py
def init_logging():
import logging
import sys
# Create a custom formatter with color
class ColoredFormatter(logging.Formatter):
COLORS = {
"ERROR": ANSI_COLORS["red"],
"WARNING": ANSI_COLORS["bold"] + ANSI_COLORS["cyan"],
"INFO": ANSI_COLORS["cyan"],
"DEBUG": ANSI_COLORS["gray"],
"RESET": ANSI_COLORS["reset"],
}
def format(self, record):
log_message = super(ColoredFormatter, self).format(record)
log_level = record.levelname
# Add color to log messages based on log level
return (
f"{self.COLORS.get(log_level, '')}{log_message}{self.COLORS['RESET']}"
)
# Create a console handler and set the formatter
ensure_folder_exists("logs")
#logging.basicConfig(filename=os.path.join("logs", "console.log"), level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logging.root.handlers.clear()
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(ColoredFormatter())
logging.root.addHandler(console_handler)
# Path: utils/file_utils.py
def ensure_folder_exists(path: str):
if not os.path.exists(path):
os.makedirs(path)
logging.info(f"Created folder {path}")
# Path: utils/gpu_utils.py
def load_gpu_task(task_name: str, client, free_vram=True):
if not torch.cuda.is_available():
logging.info("CUDA not available. Skipping offloads.")
return
global current_tasks
global last_task
last_used[task_name] = time.time()
if task_name == last_task or not free_vram:
return
last_task = task_name
logging.info(f"Freeing VRAM for task {task_name}...")
before = torch.cuda.memory_reserved()
if free_vram:
free_idle_vram(task_name)
small_tasks_only = last_task is not None and last_task in small_tasks
empty_cache = task_name != last_task
if small_tasks_only:
if task_name in large_tasks:
empty_cache = True
for _, client in current_tasks.items():
client.offload(task_name)
current_tasks.clear()
else:
if current_tasks:
empty_cache = True
for _, client in current_tasks.items():
client.offload(task_name)
current_tasks.clear()
current_tasks[task_name] = client
if empty_cache:
torch.cuda.empty_cache()
gc.collect()
after = torch.cuda.memory_reserved()
gib = bytes_to_gib(before - after)
if gib > 0:
logging.info(f"Freed {gib:.2f} GiB from VRAM cache")
logging.warn(f"Loading {task_name}...")
last_used[task_name] = time.time()
# Path: utils/gpu_utils.py
def set_idle_offload_time(timeout_seconds: float):
global idle_offload_time
idle_offload_time = timeout_seconds
# Path: utils/misc_utils.py
def print_completion_time(since, task_name=None):
t = time.time() - since
logging.info(f"{task_name or 'Task'} completed in {round(t,2)} seconds.")
return t
# Path: utils/misc_utils.py
def sys_info():
python_info = sys.version
optimizations = f"bf16={is_bf16_available}, fp16={use_fp16}, cudnn={torch.backends.cudnn.is_available()}, xformers={USE_XFORMERS}, deepspeed={USE_DEEPSPEED}"
logging.info(f"Python version: {python_info}")
logging.info(f"Using device: {autodetect_device()} ({optimizations})")
# Path: webui.py
def launch_webui(args, prevent_thread_lock=False):
if args is None or args.tts:
tts = True
else:
tts = False
async def chat(
text: str, history: list[list], speak_results: bool, chunk_sentences
):
from clients import TTSClient, Exllama2Client
from utils.chat_utils import convert_gr_to_openai
print(f"text={text}")
print(f"chunk_sentences={chunk_sentences}")
response = Exllama2Client.chat(
text=text,
messages=convert_gr_to_openai(history),
)
message = ""
for chunk in response:
message += chunk
if tts and speak_results:
logging.info("\nGenerating speech...")
async with gpu_thread_lock:
load_gpu_task("tts", TTSClient)
audio = TTSClient.generate_speech(
message,
speed=settings["speed"],
temperature=settings["temperature"],
speaker_wav=settings["voice"],
language=settings["language"],
)
yield message
play_wav_from_bytes(audio)
with gr.Blocks(title="monofy-ai", analytics_enabled=False).queue() as web_ui:
if not args or args.llm:
with gr.Tab("Chat/TTS"):
speech_checkbox = None
with gr.Row():
with gr.Column():
with gr.Row():
speech_checkbox = gr.Checkbox(
value=tts is not None,
interactive=tts is not None,
label="Speak results",
)
check_sentences_checkbox = gr.Checkbox(
value=True,
label="Chunk sentences",
visible=False, # TODO
)
grChat = gr.ChatInterface(
fn=chat,
additional_inputs=[
speech_checkbox,
check_sentences_checkbox,
],
)
grChat.queue()
if tts:
with gr.Column():
def set_language(value):
settings["language"] = value
def set_speed(value):
settings["speed"] = value
def set_temperature(value):
settings["temperature"] = value
def set_voice(value):
settings["voice"] = value
with gr.Column():
grText = gr.Textbox(
"This is a test of natural speech.", label="Text"
)
tts_voice = gr.Textbox(
os.path.join(TTS_VOICES_PATH, "female1.wav"),
label="Voice",
)
with gr.Row():
tts_speed = gr.Number("1", label="Speed")
tts_temperature = gr.Number(
"0.75", label="Temperature"
)
tts_language = gr.Dropdown(
[
"en",
"es",
"fr",
"de",
"it",
"pt",
"pl",
"tr",
"ru",
"nl",
"cs",
"ar",
"zh-cn",
"ja",
"hu",
"ko",
],
label="Language",
value=settings["language"],
)
tts_language.change(set_language, inputs=[tts_language])
tts_speed.change(set_speed, inputs=[tts_speed])
tts_temperature.change(
set_temperature, inputs=[tts_temperature]
)
tts_voice.change(set_voice, inputs=[tts_voice])
tts_button = gr.Button("Generate")
tts_output = gr.Audio(
label="Audio Output",
type="numpy",
autoplay=True,
format="wav",
interactive=False,
streaming=False, # TODO
)
import pygame
def play_wav_from_bytes(wav_bytes):
tts_output.update(wav_bytes)
return
pygame.mixer.init()
sound = pygame.mixer.Sound(io.BytesIO(wav_bytes))
sound.play()
# Wait for the sound to finish playing
pygame.time.wait(int(sound.get_length() * 1000))
async def preview_speech(
text: str,
speed: int,
temperature: float,
voice: str,
language: str,
):
from clients import TTSClient
# TODO stream to grAudio using generate_text_streaming
async with gpu_thread_lock:
load_gpu_task("tts", TTSClient)
yield TTSClient.generate_speech(
text,
speed,
temperature,
voice,
language,
)
tts_button.click(
preview_speech,
inputs=[
grText,
tts_speed,
tts_temperature,
tts_voice,
tts_language,
],
outputs=[tts_output],
)
# Right half of the screen (Chat UI) - Only if args.llm is True
if not args or args.sd:
from hyper_tile import split_attention
t2i_vid_button: gr.Button = None
async def generate_video(
image_input,
width: int,
height: int,
steps: int,
fps: int,
motion_bucket_id: int,
noise: float,
interpolate: int,
):
from clients import SDClient
# Convert numpy array to PIL Image
async with gpu_thread_lock:
load_gpu_task("img2vid", SDClient) # TODO VideoClient
image = Image.fromarray(image_input).convert("RGB")
filename_noext = random_filename(None, True)
num_frames = 50
decode_chunk_size = 25
def do_gen():
video_frames = SDClient.pipelines["img2vid"](
image,
num_inference_steps=steps,
num_frames=num_frames,
motion_bucket_id=motion_bucket_id,
decode_chunk_size=decode_chunk_size,
width=width,
height=height,
noise_aug_strength=noise,
).frames[0]
if interpolate > 1:
video_frames = modules.rife.interpolate(
video_frames,
count=interpolate,
scale=1,
pad=1,
change=0,
)
export_to_video(
video_frames,
f"{filename_noext}.mp4",
fps=fps * interpolate,
)
else:
export_to_video(
video_frames, f"{filename_noext}.mp4", fps=fps
)
return f"{filename_noext}.mp4"
if SD_USE_HYPERTILE_VIDEO:
aspect_ratio = 1 if width == height else width / height
split_vae = split_attention(
SDClient.pipelines["img2vid"].vae,
tile_size=256,
aspect_ratio=aspect_ratio,
)
split_unet = split_attention(
SDClient.pipelines["img2vid"].unet,
tile_size=256,
aspect_ratio=aspect_ratio,
)
with split_vae:
with split_unet:
yield do_gen()
else:
yield do_gen()
async def txt2img(
prompt: str,
negative_prompt: str,
width: int,
height: int,
num_inference_steps: int,
guidance_scale: float,
):
from clients import SDClient
async with gpu_thread_lock:
load_gpu_task(
"sdxl" if SD_USE_SDXL else "stable diffusion", SDClient
)
result = SDClient.pipelines["txt2img"](
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
width=width,
height=height,
)
yield result.images[0], gr.Button(
label="Generate Video", interactive=True
)
async def audiogen(prompt: str, duration: float, temperature: float):
from clients import AudioGenClient
filename_noext = random_filename(None, True)
return AudioGenClient.generate(
prompt,
file_path=filename_noext,
duration=duration,
temperature=temperature,
)
async def musicgen(prompt: str, duration: float, temperature: float):
from clients import MusicGenClient
filename_noext = random_filename(None, True)
return MusicGenClient.generate(
prompt,
file_path=filename_noext,
duration=duration,
temperature=temperature,
)
def disable_send_button():
yield gr.Button(label="Generating...", interactive=False)
with gr.Tab("Image/Video"):
with gr.Row():
with gr.Column():
t2i_prompt = gr.TextArea(
"an advanced humanoid robot with human expression in a futuristic laboratory",
lines=4,
label="Prompt",
)
t2i_negative_prompt = gr.TextArea(
"", lines=4, label="Negative Prompt"
)
with gr.Row():
t2i_width = gr.Slider(
minimum=256,
maximum=2048,
value=512,
step=8,
interactive=True,
label="Width",
)
t2i_height = gr.Slider(
minimum=256,
maximum=2048,
value=512,
step=8,
interactive=True,
label="Height",
)
t2i_steps = gr.Slider(
minimum=1,
maximum=100,
value=20,
step=1,
interactive=True,
label="Steps",
)
t2i_guidance_scale = gr.Slider(
minimum=0,
maximum=50,
value=3,
step=0.1,
interactive=True,
label="Guidance",
)
t2i_button = gr.Button("Generate")
with gr.Column():
t2i_output = gr.Image(
None,
width=512,
height=512,
interactive=False,
label="Output",
)
with gr.Row():
i2v_width = gr.Number(
512, label="Width", precision=0, step=8
)
i2v_height = gr.Number(
512, label="Height", precision=0, step=8
)
i2v_fps = gr.Number(6, label="FPS", precision=0, minimum=1)
i2v_steps = gr.Number(10, label="Steps", precision=0)
i2v_motion = gr.Number(
15, label="Motion Bucket ID", precision=0
)
i2v_noise = gr.Number(
0.0,
label="Noise (also increases motion)",
precision=0,
step=0.01,
)
i2v_interpolation = gr.Number(
3, label="Frame Interpolation", precision=0, minimum=1
)
t2i_vid_button = gr.Button("Generate Video", interactive=False)
i2v_output = gr.Video(
None,
width=320,
height=320,
interactive=False,
label="Video",
format="mp4",
autoplay=True,
)
t2i_vid_button.click(
generate_video,
inputs=[
t2i_output,
i2v_width,
i2v_height,
i2v_steps,
i2v_fps,
i2v_motion,
i2v_noise,
i2v_interpolation,
],
outputs=[i2v_output],
)
t2i_button.click(disable_send_button, outputs=[t2i_vid_button])
t2i_button.click(
txt2img,
inputs=[
t2i_prompt,
t2i_negative_prompt,
t2i_width,
t2i_height,
t2i_steps,
t2i_guidance_scale,
],
outputs=[t2i_output, t2i_vid_button],
)
with gr.Tab("Audio/Music"):
with gr.Row():
with gr.Column():
audiogen_prompt = gr.TextArea(
"robot assembly line", label="Audio description", lines=3
)
with gr.Row():
audiogen_duration = gr.Slider(
minimum=1,
maximum=30,
value=3,
step=1,
interactive=True,
label="Duration (seconds)",
)
audiogen_temperature = gr.Slider(
minimum=0.1,
maximum=1.9,
value=1,
step=0.05,
interactive=True,
label="Temperature",
)
audiogen_button = gr.Button("Generate Audio")
audiogen_output = gr.Audio(interactive=False)
audiogen_button.click(
audiogen,
inputs=[
audiogen_prompt,
audiogen_duration,
audiogen_temperature,
],
outputs=[audiogen_output],
)
with gr.Column():
musicgen_prompt = gr.TextArea(
"techno beat with a cool bassline",
label="Music description",
lines=3,
)
with gr.Row():
musicgen_duration = gr.Slider(
minimum=1,
maximum=30,
value=15,
step=1,
interactive=True,
label="Duration (seconds)",
)
musicgen_temperature = gr.Slider(
minimum=0.1,
maximum=1.9,
value=1,
step=0.05,
interactive=True,
label="Temperature",
)
musicgen_button = gr.Button("Generate Music")
musicgen_output = gr.Audio(interactive=False)
musicgen_button.click(
musicgen,
inputs=[
musicgen_prompt,
musicgen_duration,
musicgen_temperature,
],
outputs=[musicgen_output],
)
with gr.Tab("Shap-e"):
async def shape_generate(prompt: str, steps: int, guidance: float):
from clients import ShapeClient
async with gpu_thread_lock:
load_gpu_task("shap-e", ShapeClient)
filename_noext = random_filename(None, True)
file_path = ShapeClient.generate(
prompt,
steps=steps,
guidance_scale=guidance,
file_path=filename_noext,
format="glb",
)
print(file_path)
yield file_path
with gr.Row():
with gr.Column():
shap_e_prompt = gr.TextArea("a humanoid robot", label="Prompt")
shap_e_guidance = gr.Slider(
minimum=0,
maximum=50,
value=15,
step=0.1,
interactive=True,
label="Guidance",
)
shap_e_steps = gr.Slider(
minimum=1,
maximum=100,
value=20,
step=1,
interactive=True,
label="Steps",
)
shap_e_button = gr.Button("Generate")
with gr.Column():
shap_e_output = gr.Model3D(
None,
interactive=False,
label="Output",
)
shap_e_button.click(
shape_generate,
inputs=[
shap_e_prompt,
shap_e_steps,
shap_e_guidance,
],
outputs=[shap_e_output],
)
web_ui.launch(
prevent_thread_lock=prevent_thread_lock, inbrowser=args and not args.all
)
return web_ui
# Path: run.py
import time
import torch
import logging
import uvicorn
from utils.startup_args import print_help, startup_args as args
from settings import (
HOST,
IDLE_OFFLOAD_TIME,
MEDIA_CACHE_DIR,
PORT,
SD_USE_SDXL
)
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from utils.console_logging import init_logging
from utils.file_utils import ensure_folder_exists
from utils.gpu_utils import load_gpu_task, set_idle_offload_time
from utils.misc_utils import print_completion_time, sys_info
from webui import launch_webui
from apis import txt2img, img2img, depth, txt2vid, img2vid, shape, audiogen, musicgen
from apis.llm import llm_api
from apis import whisper
from apis.tts import tts_api
from clients import SDClient
from clients import TTSClient
from clients import Exllama2Client
API_PREFIX = "/api"
init_logging()
start_time = None
end_time = None
sys_info()
ensure_folder_exists(MEDIA_CACHE_DIR)
def start_fastapi(args=None):
global start_time
start_time = time.time()
app = FastAPI(
title="monofy-ai",
description="Simple and multifaceted API for AI",
version="0.0.1",
redoc_url="/api/docs",
docs_url="/api/docs/swagger",
)
set_idle_offload_time(IDLE_OFFLOAD_TIME)
if args is None or args.all or args.sd:
app.include_router(txt2img.router, prefix=API_PREFIX)
app.include_router(img2img.router, prefix=API_PREFIX)
app.include_router(depth.router, prefix=API_PREFIX)
app.include_router(txt2vid.router, prefix=API_PREFIX)
app.include_router(img2vid.router, prefix=API_PREFIX)
app.include_router(shape.router, prefix=API_PREFIX)
app.include_router(audiogen.router, prefix=API_PREFIX)
app.include_router(musicgen.router, prefix=API_PREFIX)
if args is None or args.all or args.llm:
app.include_router(whisper.router, prefix=API_PREFIX)
# TODO use router
llm_api(app)
if args is None or args.all or args.tts:
tts_api(app)
app.mount("/", StaticFiles(directory="public_html", html=True), name="static")
return app
def print_startup_time():
global start_time
global end_time
if end_time is None:
end_time = print_completion_time(start_time, "Startup")
def warmup(args):
logging.info("Warming up...")
if args is None or args.sd:
load_gpu_task("sdxl" if SD_USE_SDXL else "stable diffusion", SDClient, False)
SDClient.pipelines["txt2img"] # just reference something so the module loads
logging.info(f"[--warmup] {SDClient.friendly_name} ready.")
if args is None or args.tts:
load_gpu_task("tts", TTSClient, False)
| TTSClient.generate_speech("Initializing speech.") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wadiuvatzy/SAM-G
# Path: logger.py
class Logger(object):
def __init__(self, log_dir, use_tb=False, use_wandb=False):
self._log_dir = log_dir
self._train_mg = MetersGroup(log_dir / 'train.csv',
formating=COMMON_TRAIN_FORMAT,
use_wandb=use_wandb)
self._eval_mg = MetersGroup(log_dir / 'eval.csv',
formating=COMMON_EVAL_FORMAT,
use_wandb=use_wandb)
if use_tb:
self._sw = SummaryWriter(str(log_dir / 'tb'))
else:
self._sw = None
self.use_wandb = use_wandb
def _try_sw_log(self, key, value, step):
if self._sw is not None:
self._sw.add_scalar(key, value, step)
def log(self, key, value, step):
assert key.startswith('train') or key.startswith('eval')
if type(value) == torch.Tensor:
value = value.item()
self._try_sw_log(key, value, step)
mg = self._train_mg if key.startswith('train') else self._eval_mg
mg.log(key, value)
def log_metrics(self, metrics, step, ty):
for key, value in metrics.items():
self.log(f'{ty}/{key}', value, step)
def dump(self, step, ty=None):
if ty is None or ty == 'eval':
self._eval_mg.dump(step, 'eval')
if ty is None or ty == 'train':
self._train_mg.dump(step, 'train')
def log_and_dump_ctx(self, step, ty):
return LogAndDumpCtx(self, step, ty)
# Path: replay_buffer.py
class ReplayBufferStorage:
def __init__(self, data_specs, replay_dir):
self._data_specs = data_specs
self._replay_dir = replay_dir
replay_dir.mkdir(exist_ok=True)
self._current_episode = defaultdict(list)
self._preload()
def __len__(self):
return self._num_transitions
def add(self, time_step):
for spec in self._data_specs:
value = time_step[spec.name]
if np.isscalar(value):
value = np.full(spec.shape, value, spec.dtype)
assert spec.shape == value.shape and spec.dtype == value.dtype
self._current_episode[spec.name].append(value)
if time_step.last():
episode = dict()
for spec in self._data_specs:
value = self._current_episode[spec.name]
episode[spec.name] = np.array(value, spec.dtype)
self._current_episode = defaultdict(list)
self._store_episode(episode)
def _preload(self):
self._num_episodes = 0
self._num_transitions = 0
for fn in self._replay_dir.glob('*.npz'):
_, _, eps_len = fn.stem.split('_')
self._num_episodes += 1
self._num_transitions += int(eps_len)
def _store_episode(self, episode):
eps_idx = self._num_episodes
eps_len = episode_len(episode)
self._num_episodes += 1
self._num_transitions += eps_len
ts = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
eps_fn = f'{ts}_{eps_idx}_{eps_len}.npz'
save_episode(episode, self._replay_dir / eps_fn)
# Path: replay_buffer.py
def make_replay_loader(replay_dir, max_size, batch_size, num_workers,
save_snapshot, nstep, discount):
max_size_per_worker = max_size // max(1, num_workers)
iterable = ReplayBuffer(replay_dir,
max_size_per_worker,
num_workers,
nstep,
discount,
fetch_every=1000,
save_snapshot=save_snapshot)
loader = torch.utils.data.DataLoader(iterable,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
worker_init_fn=_worker_init_fn)
return loader
# Path: video.py
class TrainVideoRecorder:
def __init__(self, root_dir, render_size=256, fps=20):
if root_dir is not None:
self.save_dir = root_dir / 'train_video'
self.save_dir.mkdir(exist_ok=True)
else:
self.save_dir = None
self.render_size = render_size
self.fps = fps
self.frames = []
def init(self, obs, enabled=True):
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(obs)
def record(self, obs):
if self.enabled:
frame = cv2.resize(obs[-3:].transpose(1, 2, 0),
dsize=(self.render_size, self.render_size),
interpolation=cv2.INTER_CUBIC)
self.frames.append(frame)
def save(self, file_name):
if self.enabled:
path = self.save_dir / file_name
imageio.mimsave(str(path), self.frames, fps=self.fps)
# Path: video.py
class VideoRecorder:
def __init__(self, root_dir, render_size=256, fps=20):
if root_dir is not None:
self.save_dir = root_dir / 'eval_video'
self.save_dir.mkdir(exist_ok=True)
else:
self.save_dir = None
self.render_size = render_size
self.fps = fps
self.frames = []
def init(self, env, enabled=True):
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(env)
def record(self, env):
if self.enabled:
if hasattr(env, 'physics'):
frame = env.physics.render(height=self.render_size,
width=self.render_size,
camera_id=0)
else:
frame = env.render()
self.frames.append(frame)
def save(self, file_name):
if self.enabled:
path = self.save_dir / file_name
imageio.mimsave(str(path), self.frames, fps=self.fps)
# Path: eval.py
import warnings
import os
import hydra
import numpy as np
import torch
import wrappers.dmc as dmc
import utils
import wandb
import imageio
import cv2
import sys
import matplotlib.pyplot as plt
from pathlib import Path
from dm_env import specs
from logger import Logger
from replay_buffer import ReplayBufferStorage, make_replay_loader
from video import TrainVideoRecorder, VideoRecorder
from tqdm import tqdm
from wrappers.robo_wrapper import robo_make
from wrappers.habi_wrapper import make_habitat_env
from wrappers.carla_wrapper import carla_make_eval
from wrappers.robo_wrapper import robo_make
from eval import Workspace as W
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
count = 0
success_rate = 0
for i in tqdm(range(1, 11)):
episode_reward = 0
time_step = self.eval_env.reset()
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
while not time_step.last():
if self.agent_name == 'pieg':
with torch.no_grad():
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
else:
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
time_step = self.eval_env.step(action)
self.video_recorder.record(self.eval_env)
total_reward += time_step.reward
episode_reward += time_step.reward
step += 1
success_rate += time_step.info['success']
f = open("{}/file_{}.txt".format(self.model_work_dir, self.cfg.seed), 'a')
f.write("episode_reward: %f \n" % (float(episode_reward)))
f.write("success_rate: %f \n" % (float(time_step.info['success'])))
f.close()
episode += 1
self.video_recorder.save(f'{self.global_frame}.mp4')
print(f'Seed {self.cfg.seed} Mean_reward: ', total_reward / episode)
print(f'Seed {self.cfg.seed} Success_rate: ', success_rate / episode)
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
log('episode_reward', total_reward / episode)
log('episode_length', step * self.cfg.action_repeat / episode)
log('episode', self.global_episode)
log('step', self.global_step)
log('success_rate', success_rate / episode)
def carla_eval(self):
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
# carla metrics:
reason_each_episode_ended = []
distance_driven_each_episode = []
crash_intensity = 0.
steer = 0.
brake = 0.
count = 0
success_num = 0
for i in range(50):
time_step = self.eval_env.reset()
# To check wether the weather is successfully changed
if i == 0:
plt.imshow(time_step.observation[6:9].transpose(1, 2, 0) / 255.)
plt.savefig(f'{self.work_dir}/test.png')
# self.video_recorder.init(enabled=True)
dist_driven_this_episode = 0.
while not time_step.last():
if self.agent_name == 'pieg':
with torch.no_grad():
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
else:
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
time_step, info = self.eval_env.step(action)
# self.video_recorder.record(self.eval_env)
total_reward += time_step.reward
step += 1
dist_driven_this_episode += info['distance']
crash_intensity += info['crash_intensity']
steer += abs(info['steer'])
brake += info['brake']
count += 1
episode += 1
print('total_reward per episode:', total_reward / episode)
# self.video_recorder.save(f'{episode}.mp4')
reason_each_episode_ended.append(info['reason_episode_ended'])
distance_driven_each_episode.append(dist_driven_this_episode)
if info['reason_episode_ended'] == 'success':
success_num += 1
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
log('episode_reward', total_reward / episode)
log('episode_length', step * self.cfg.action_repeat / episode)
log('episode', self.global_episode)
log('step', self.global_step)
log('success_rate', success_num / episode)
print('METRICS--------------------------')
print("reason_each_episode_ended: {}".format(reason_each_episode_ended))
print("distance_driven_each_episode: {}".format(distance_driven_each_episode))
print('crash_intensity: {}'.format(crash_intensity / self.cfg.num_eval_episodes))
print('steer: {}'.format(steer / count))
print('brake: {}'.format(brake / count))
print('---------------------------------')
f = open("{}/file_{}.txt".format(self.work_dir, self.seed), 'a')
f.write("seed: %f \n" % (self.seed))
f.write("weather_name: %s \n" % (self.env_weather_name))
f.write("reward: %f \n" % (float(total_reward / episode)))
f.write("distance: %f \n" % (float(np.mean(distance_driven_each_episode))))
f.write("steer: %f \n" % (steer / count))
f.write("brake: %f \n" % (brake / count))
f.write("reason_episode_ended: {} \n".format(reason_each_episode_ended))
| f.write("distance all: {} \n".format(distance_driven_each_episode)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: USB-Data-Logger/PythonBasedPCapp
# Path: libs/utils.py
def get_icon(icon_path):
return ImageTk.PhotoImage(Image.open(resource_path(icon_path)))
# Path: libs/utils.py
def get_formatted_date(format_str, suffix=""):
format_str = format_str.replace("%o", suffix)
return datetime.now().strftime(format_str)
# Path: libs/utils.py
def resource_path(relative_path):
"""Get absolute path to resource, works for dev and for PyInstaller"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# Path: libs/utils.py
def get_com_port(settings):
com_ports = [f"COM{i}" for i in range(10)]
if not settings["com_port"] in com_ports:
com_ports.append(settings["com_port"])
return com_ports
# Path: libs/constants.py
FILE_NAME_TEMPLATE = {
"Default (Date+time+Optional suffix)": "%Y-%m-%d %H.%M.%S %o",
"Date only (Date+Optional suffix)": "%Y-%m-%d %o",
"User Input": "%o",
}
SETTINGS_FILE = "settings.json"
ASSET_PATH = "asset"
MAIN_WINDOW_WIDTH = 450
MAIN_WINDO_HEIGHT = 300
MAIN_WINDOW_GEOMETRY = f"{MAIN_WINDOW_WIDTH}x{MAIN_WINDO_HEIGHT}"
# Path: libs/settings.py
def load_settings(settings_path):
def save_settings(settings_file, settings):
# Path: libs/serial_communicator.py
class SerialCommunicator:
def __init__(self):
self.serial_port = None
self.stop_thread = False
def open_serial_port(self, port, baudrate):
try:
self.serial_port = serial.Serial(port, baudrate, timeout=1)
except serial.SerialException as e:
raise RuntimeError(f"Error opening serial port: {e}")
def close_serial_port(self):
if self.serial_port:
self.serial_port.close()
def read_line(self):
if self.serial_port:
return self.serial_port.readline().decode("utf-8").strip()
return ""
# Path: dialogs/settings_window.py
class SettingsWindow:
def __init__(self, parent, settings, on_distroy=None):
self.parent = parent
self.place_holder = "Optional suffix"
settings_icon = get_icon(path.join(constants.ASSET_PATH, "smallicon_setting.ico"))
self.settings = settings
self.on_distroy = on_distroy
self.settings_window = ctk.CTkToplevel(parent)
self.settings_window.transient(self.parent)
self.settings_window.configure()
self.settings_window.title("Settings")
self.settings_window.geometry("460x200")
self.settings_window.resizable(False, False)
# Folder Selection
self.folder_label = ctk.CTkLabel(
self.settings_window, text="Select Folder Location:"
)
self.folder_label.place(x=10, y=7)
self.folder_var = ctk.StringVar()
self.folder_entry = ctk.CTkEntry(
self.settings_window,
width=210,
textvariable=self.folder_var,
)
self.folder_entry.place(x=150, y=7)
ToolTip(self.folder_entry, msg=self.folder_var.get)
self.browse_button = ctk.CTkButton(
self.settings_window,
text="Browse",
width=80,
fg_color="#0F0F0F",
hover_color="#474747",
command=self.browse_folder,
)
self.browse_button.place(x=373, y=7)
# File Name Template
self.template_label = ctk.CTkLabel(
self.settings_window, text="File Name Template:"
)
self.template_label.place(x=10, y=50)
self.template_var = ctk.StringVar()
self.combo_format = ctk.CTkComboBox(
self.settings_window,
values=[i for i in constants.FILE_NAME_TEMPLATE.keys()],
command=self.combo_format_selected,
width=300,
)
self.combo_format.place(x=150, y=50)
self.file_template_render = ctk.StringVar()
ctk.CTkLabel(
self.settings_window,
font=("impack", 15, "bold"),
width=150,
textvariable=self.file_template_render,
).place(x=150, y=80)
# Buffer Size
self.buffer_label = ctk.CTkLabel(self.settings_window, text="Buffer Size:")
self.buffer_label.place(x=20, y=110)
self.buffer_var = ctk.StringVar()
self.buffer_entry = ctk.CTkEntry(
self.settings_window, textvariable=self.buffer_var
)
self.buffer_entry.place(x=150, y=110)
# Save and Exit Button
self.save_and_exit_btn = ctk.CTkButton(
self.settings_window,
text="Save And Exit",
fg_color="#0F0F0F",
hover_color="#474747",
command=self.settings_ok,
)
self.save_and_exit_btn.place(x=150, y=160)
self.discard_button = ctk.CTkButton(
self.settings_window,
text="Discard Settings",
fg_color="#0F0F0F",
hover_color="#474747",
command=self.settings_window.destroy,
)
self.discard_button.place(x=310, y=160)
self.settings_window.wm_iconbitmap()
self.settings_window.after(
300, lambda: self.settings_window.iconphoto(False, settings_icon)
),
self.load_settings()
self.settings_window.focus()
self.settings_window.wait_visibility()
self.settings_window.grab_set()
def combo_format_selected(self, choice):
foramtted_date = get_formatted_date(
constants.FILE_NAME_TEMPLATE.get(choice, choice)
)
if choice != "User Input":
self.file_template_render.set(foramtted_date + "[Optional suffix].csv")
self.place_holder = "Optional suffix"
else:
self.file_template_render.set(
foramtted_date + "[User must input file name].csv"
)
self.place_holder = "Enter File Name"
self.template_var.set(choice)
self.settings["file_name_template"] = choice
def load_settings(self):
self.folder_var.set(self.settings["folder"])
self.template_var.set(self.settings["file_name_template"])
self.combo_format.set(self.settings["file_name_template"])
foramtted_date = get_formatted_date(
constants.FILE_NAME_TEMPLATE.get(
self.settings["file_name_template"],
self.settings["file_name_template"],
)
)
if self.settings["file_name_template"] != "User Input":
self.file_template_render.set(foramtted_date + "[Optional suffix].csv")
else:
self.file_template_render.set(foramtted_date + ".csv")
self.template_var.set(self.settings["file_name_template"])
self.buffer_var.set(self.settings["buffer_size"])
def browse_folder(self):
folder_selected = filedialog.askdirectory()
if not folder_selected:
folder_selected = self.settings["folder"]
if folder_selected:
self.folder_var.set(folder_selected)
def settings_ok(self):
self.settings["folder"] = self.folder_var.get()
self.settings["file_name_template"] = self.combo_format.get()
self.settings["buffer_size"] = self.buffer_var.get()
if self.on_distroy:
self.on_distroy()
self.settings_window.destroy()
# Path: dialogs/help_window.py
class HelpWindow:
def __init__(self,parent):
self.help_window = ctk.CTkToplevel(parent)
help_icon_path = get_icon(path.join(constants.ASSET_PATH, "Smallicon_help.ico"))
# Load the PNG file
image = Image.open(
resource_path(path.join(constants.ASSET_PATH, "HelpImage.png"))
)
# Convert the image to a format which Tkinter can use
photo = ctk.CTkImage(light_image=image, size=image.size)
# Create a new window or Use an existing widget to disply the image
self.help_window.transient(parent)
self.help_window.title("Help Image")
self.help_window.wm_iconbitmap()
# Set the help window icon
# https://github.com/TomSchimansky/CustomTkinter/issues/2160
self.help_window.after(300, lambda: self.help_window.iconphoto(False, help_icon_path))
self.help_window.wait_visibility()
self.help_window.grab_set()
# Create a label in the new window to display the image
image_label = ctk.CTkLabel(self.help_window, image=photo, text="")
image_label.pack(fill=ctk.BOTH, expand=True)
# Path: Serial2CSVscript.py
import os
import threading
import customtkinter as ctk
from datetime import datetime
from os import path
from PIL import Image
from tktooltip import ToolTip
from libs.utils import (
get_icon,
get_formatted_date,
resource_path,
get_com_port,
)
from libs import constants
from libs import settings as st
from libs.serial_communicator import SerialCommunicator
from dialogs.settings_window import SettingsWindow
from dialogs.help_window import HelpWindow
command=self.com_port_clicked,
)
self.combo_com_port.place(x=10, y=25)
self.combo_com_port.set(self.settings["com_port"])
ToolTip(
self.combo_com_port,
msg="you can type custom com port number\n or path address in the window",
bg="grey",
fg="white",
)
self.combo_baud_rate = ctk.CTkComboBox(
self.root,
values=["9600", "19200", "38400", "57600", "115200"],
command=self.baud_rate_clicked,
)
self.combo_baud_rate.place(x=300, y=25)
self.combo_baud_rate.set(self.settings["baud_rate"])
ToolTip(
self.combo_baud_rate,
msg="you can type custom value\nin the window if needed",
bg="grey",
fg="white",
x_offset=-90,
)
ctk.CTkLabel(
self.root,
text="Output file name",
font=(
"impack",
15,
),
).place(x=170, y=65)
self.lbl_prefix = ctk.CTkLabel(self.root)
self.lbl_prefix.place(x=30, y=90)
self.file_suffix_entry = ctk.CTkEntry(
self.root,
# Here the placeholder text needs to be updated accordingly
placeholder_text=self.place_holder,
)
self.file_suffix_entry.place(x=155, y=90)
ctk.CTkLabel(self.root, text=".csv").place(x=300, y=90)
self.start_stop_button = ctk.CTkButton(
self.root,
text="Start Monitoring",
fg_color="#0F0F0F",
hover_color="#474747",
command=self.toggle_monitoring,
)
self.start_stop_button.place(x=155, y=130)
self.help_btn = ctk.CTkButton(
self.root,
width=80,
text="Help",
fg_color="#0F0F0F",
hover_color="#474747",
command=lambda :HelpWindow(self.root),
)
self.help_btn.place(x=360, y=90)
self.setting_btn = ctk.CTkButton(
self.root,
width=80,
text="Settings",
fg_color="#0F0F0F",
hover_color="#474747",
command=self.open_settings,
)
self.setting_btn.place(x=360, y=130)
self.status_label = ctk.CTkLabel(self.root, text="Monitoring Console")
self.status_label.place(x=10, y=138)
self.output_window = ctk.CTkTextbox(
self.root,
width=430,
height=120,
fg_color="#0F0F0F",
wrap=ctk.WORD, # setting for how many line
)
self.output_window.place(x=10, y=170)
# Set the state of the ScrolledText widget to DISABLED
self.output_window.configure(state=ctk.DISABLED)
ToolTip(self.output_window, msg="Message")
# Function to open Help Image
def com_port_clicked(self, choice):
self.settings["com_port"] = choice
def baud_rate_clicked(self, choice):
self.settings["baud_rate"] = choice
def open_settings(self):
self.settings_window = SettingsWindow(
self.root, self.settings, self.settings_window_distroy
)
def settings_window_distroy(self):
self.settings = self.settings_window.settings
self.place_holder = self.settings_window.place_holder
st.save_settings(constants.SETTINGS_FILE, self.settings)
self.file_suffix_entry.configure(placeholder_text=self.place_holder)
self.update_lbl_prefix()
def open_serial_port(self):
self.serial_communicator.open_serial_port(
self.settings["com_port"],
int(self.settings["baud_rate"]),
)
def create_logging_thread(self):
self.serial_communicator.stop_thread = False
| self.serial_thread = threading.Thread(target=self.serial_reader)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhangzm0128/DVT
# Path: utils/dataloader.py
class CifarLoader:
def __init__(self, config):
self.data_root = os.path.join(config['data_root'], config['cifar_type'])
self.cifar_type = config['cifar_type']
self.valid_scale = config['valid_scale']
self.batch_size = config['batch_size']
self.img_size = config['img_size']
self.norm = config['norm']
self.mp = config['multi_process']
self.num_classes = config['num_classes']
# augmentation
if 'augmentation' in config:
aug = []
aug_config = config['augmentation']
aug += [transforms.RandomHorizontalFlip(),
transforms.RandomCrop(self.img_size, padding=4)]
if 'aug_policy' in aug_config:
if aug_config['aug_policy'] == 'CIFAR':
aug_policy = CIFARPolicy()
aug += [aug_policy]
aug += [transforms.ToTensor(),
transforms.Normalize(self.norm[0], self.norm[1])]
if 'random_erasing' in aug_config:
re_config = aug_config['random_erasing']
re = RandomErasing(re_config['prob'], sh=re_config['sh'], r1=re_config['r1'], mean=self.norm[0])
aug += [re]
train_transform = transforms.Compose(aug)
else:
train_transform = transforms.Compose([
transforms.RandomCrop(self.img_size, padding=4),
transforms.Resize(self.img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(self.norm[0], self.norm[1]),
])
val_test_transform = transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize(self.norm[0], self.norm[1]),
])
if self.cifar_type == 'CIFAR10':
trainset = torchvision.datasets.CIFAR10(root=self.data_root, train=True, download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR10(root=self.data_root, train=False, download=True, transform=val_test_transform)
self.classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
elif self.cifar_type == 'CIFAR100':
trainset = torchvision.datasets.CIFAR100(root=self.data_root, train=True, download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR100(root=self.data_root, train=False, download=True, transform=val_test_transform)
self.classes = ('apple', 'aquarium_fish', 'baby', 'bear', 'beaver',
'bed', 'bee', 'beetle', 'bicycle', 'bottle',
'bowl', 'boy', 'bridge', 'bus', 'butterfly',
'camel', 'can', 'castle', 'caterpillar', 'cattle',
'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach',
'couch', 'cra', 'crocodile', 'cup', 'dinosaur',
'dolphin', 'elephant', 'flatfish', 'forest', 'fox',
'girl', 'hamster', 'house', 'kangaroo', 'keyboard',
'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard',
'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain',
'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid',
'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree',
'plain', 'plate', 'poppy', 'porcupine', 'possum',
'rabbit', 'raccoon', 'ray', 'road', 'rocket',
'rose', 'sea', 'seal', 'shark', 'shrew',
'skunk', 'skyscraper', 'snail', 'snake', 'spider',
'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table',
'tank', 'telephone', 'television', 'tiger', 'tractor',
'train', 'trout', 'tulip', 'turtle', 'wardrobe',
'whale', 'willow_tree', 'wolf', 'woman', 'worm')
if self.valid_scale != 0:
train_size, val_size = len(trainset) * (1 - self.valid_scale), len(trainset) * self.valid_scale
train_, valid_ = torch.utils.data.random_split(trainset, [int(train_size), int(val_size)])
self.trainloader = torch.utils.data.DataLoader(train_, num_workers=self.mp, pin_memory=True,
batch_sampler=RASampler(len(train_), self.batch_size, 1, aug_config['repeat_aug'], shuffle=True, drop_last=True))
self.validloader = torch.utils.data.DataLoader(valid_, batch_size=self.batch_size, shuffle=False, pin_memory=True, num_workers=self.mp)
self.testloader = torch.utils.data.DataLoader(testset, batch_size=self.batch_size, shuffle=False, pin_memory=True, num_workers=self.mp)
else:
self.trainloader = torch.utils.data.DataLoader(trainset, num_workers=self.mp, pin_memory=True,
batch_sampler=RASampler(len(trainset), self.batch_size, 1, aug_config['repeat_aug'], shuffle=True, drop_last=True))
self.validloader = torch.utils.data.DataLoader(testset, batch_size=self.batch_size, shuffle=False, pin_memory=True, num_workers=self.mp)
self.testloader = torch.utils.data.DataLoader(testset, batch_size=self.batch_size, shuffle=False, pin_memory=True, num_workers=self.mp)
# Path: utils/logger.py
class LoggerWriter:
'''
LoggerWriter completes the functions implementation of log writing and model saving
Inputs: config, checkpoint
- config: the global config file for whole application
- checkpoint: the checkpoint path to load, default is None
'''
def __init__(self, config, checkpoint=None):
self.config = config
self.checkpoint = checkpoint
self.model_save_index = 0
self.last_metric = {}
self.net_name = self.config['network_params']['name']
self.lr_name = self.config['train_params']['learning_rate']
self.loss_name = self.config['train_params']['loss']
self.proj_root = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
self.init_path()
self.set_log_format()
def init_path(self):
'''
init path based on checkpoint path, if it is None, init path based on time, network's name, loss's name, and lr
'''
if self.checkpoint is None:
log_root = self.config['log_params']['log_root']
if not os.path.exists(log_root):
raise RuntimeError('Log root directory "{}" does not exist'.format(log_root))
create_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))
self.log_dir = os.path.join(self.config['log_params']['log_root'], '{}_{}_{}_{}'.format(
create_time, self.net_name, self.lr_name, self.loss_name))
os.mkdir(self.log_dir)
self.config_save_path = os.path.join(self.log_dir, 'config')
self.weight_save_path = os.path.join(self.log_dir, 'weight')
self.model_save_path = os.path.join(self.log_dir, 'model')
self.loss_save_path = os.path.join(self.log_dir, 'loss')
os.mkdir(self.config_save_path)
os.mkdir(self.weight_save_path)
os.mkdir(self.model_save_path)
os.mkdir(self.loss_save_path)
save_config_file = open(os.path.join(self.config_save_path, 'config.json'), 'w')
json.dump(self.config, save_config_file, indent=4)
save_config_file.close()
copyfile(os.path.join(self.proj_root, 'network/network.py'), os.path.join(self.model_save_path, 'network.py'))
copyfile(os.path.join(self.proj_root, 'network/loss.py'), os.path.join(self.model_save_path, 'loss.py'))
copyfile(os.path.join(self.proj_root, 'train.py'), os.path.join(self.model_save_path, 'train.py'))
else:
if not os.path.exists(self.checkpoint):
raise RuntimeError('Checkpoint directory "{}" does not exist'.format(self.checkpoint))
self.log_dir = self.checkpoint
self.config_save_path = os.path.join(self.log_dir, 'config')
self.weight_save_path = os.path.join(self.log_dir, 'weight')
self.model_save_path = os.path.join(self.log_dir, 'model')
self.loss_save_path = os.path.join(self.log_dir, 'loss')
def set_log_format(self, log_header=None):
'''
This function sets the table header of log file, if log_header is None, set as default format
'''
if log_header is None:
self.log_header = 'Epoch,Iter,Loss-{},Time\n'.format(self.loss_name)
self.log_format = '{},{},{},{}\n'
else:
self.log_header = log_header
self.log_format = ','.join(['{}']*len(self.log_header.split(',')))+'\n'
def init_logs(self):
'''
Create log file
'''
self.train_log = os.path.join(self.loss_save_path, 'train_loss.csv')
self.valid_log = os.path.join(self.loss_save_path, 'valid_loss.csv')
if not os.path.exists(self.train_log):
with open(self.train_log, 'w') as f:
f.write(self.log_header)
f.close()
if not os.path.exists(self.valid_log):
with open(self.valid_log, 'w') as f:
f.write(self.log_header)
f.close()
def write_train_log(self, args):
with open(self.train_log, 'a') as f:
f.write(self.log_format.format(*args))
f.close()
def write_valid_log(self, args):
with open(self.valid_log, 'a') as f:
f.write(self.log_format.format(*args))
f.close()
def load_model(self, model_name=None, device='cuda'):
'''
Load saved model based on the network and weight in checkpoint path
'''
# net = eval(self.net_name)(self.config['network_params'], device) # load model based on network name in config
# load network model from checkpoint file
spec = importlib.util.spec_from_file_location(
'network',
os.path.join(self.checkpoint, 'model', 'network.py')
)
load_network_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(load_network_module)
net = eval('load_network_module.' + self.net_name)(self.config['network_params'], device)
if model_name is not None:
model_path = os.path.join(self.weight_save_path, model_name + '.pkl')
self.model_name = model_name
elif os.path.exists(os.path.join(self.weight_save_path, 'best_model.pkl')):
# if model_name is None, load best_model.pkl as default weight
model_path = os.path.join(self.weight_save_path, 'best_model.pkl')
self.model_name = 'best'
else:
raise RuntimeError('The model "{}" dose not exist'.format(model_name))
net.load_state_dict(torch.load(model_path, map_location=torch.device(device)))
return net
def save_model(self, net, metric, mode='min', prefix=None):
'''
Save the weight of model
Paramters:
- net: network<torch.nn.Module>
- metric: the evaluation metrics which the model saving is based on
- mode: mode limited in ['min', 'max'], if mode is 'min', select the minimal metrics as best model
'''
if prefix is None:
model_name = 'model'
else:
model_name = prefix + '_model'
# torch.save(net.state_dict(), os.path.join(self.weight_save_path, '{}_{}.pkl'.format(model_name, self.model_save_index)))
self.model_save_index += 1
if prefix not in self.last_metric:
torch.save(net.state_dict(), os.path.join(self.weight_save_path, 'best_{}.pkl'.format(model_name)))
self.last_metric[prefix] = metric
torch.save(net.state_dict(), os.path.join(self.weight_save_path, 'best_{}_{}00.pkl'.format(model_name, self.model_save_index // 100)))
else:
if mode == 'min':
if metric < self.last_metric[prefix]:
torch.save(net.state_dict(), os.path.join(self.weight_save_path, 'best_{}.pkl'.format(model_name)))
self.last_metric[prefix] = metric
torch.save(net.state_dict(), os.path.join(self.weight_save_path, 'best_{}_{}00.pkl'.format(model_name, self.model_save_index // 100)))
elif mode == 'max':
if metric > self.last_metric[prefix]:
torch.save(net.state_dict(), os.path.join(self.weight_save_path, 'best_{}.pkl'.format(model_name)))
self.last_metric[prefix] = metric
torch.save(net.state_dict(), os.path.join(self.weight_save_path, 'best_{}_{}00.pkl'.format(model_name, self.model_save_index // 100)))
else:
raise ValueError('Save mode must be in ["max", "min"], error {}'.format(mode))
# Path: train.py
class Train:
def __init__(self, config, logger, net, train_data_loader, valid_data_loader, device):
self.config = config
self.device = device
self.logger = logger
self.net = net
for param_tensor in self.net.state_dict():
print(param_tensor, "\t", self.net.state_dict()[param_tensor].size())
self.train_data_loader = train_data_loader
self.valid_data_loader = valid_data_loader
self.num_classes = config['data_params']['num_classes']
Loss = LossUtils(self.device)
self.loss = Loss(config['train_params']['loss'])
self.lr = config['train_params']['learning_rate']
self.opt = config['train_params']['optimizer']
self.epoch = config['train_params']['epoch']
self.save_mode = config['train_params']['save_mode']
self.metrics = ClassMetrics(self.num_classes)
self.metrics.set_report_metrics(config['train_params']['report_metrics'])
self.report_format = config['train_params']['report_format']
self.no_improve = 0
self.stopper = False
self.best_val_loss = None
self.set_opt()
self.set_scheduler()
def set_opt(self):
if 'opt_args' not in self.config['train_params']:
self.opt = eval('optim.' + self.opt)(self.net.parameters(), lr=self.lr)
# self.opt = eval('optim.' + self.opt)(self.net.parameters(), lr=self.lr)
# self.opt = eval('optim.' + self.opt)(
# [{'params': self.net.parameters(), 'lr': self.lr*10},
# # {'params': self.net.transformer.parameters(), 'lr': self.lr},
# ], lr=self.lr)
else:
self.opt = eval('optim.' + self.opt)(self.net.parameters(), self.lr, **self.config['train_params']['opt_args'])
# self.opt = eval('optim.' + self.opt)(
# [{'params': self.net.net.transformer.parameters(), "lr": self.lr},
# {'params': self.net.net.mlp_head.parameters(), "lr": self.lr-1e-3}
# ], **self.config['train_params']['opt_args'])
def set_scheduler(self):
if 'lr_scheduler' in self.config['train_params'] and self.config['train_params']['lr_scheduler'] != {}:
n_iter_per_epoch = len(self.train_data_loader)
num_steps = int(self.epoch * n_iter_per_epoch)
warmup_steps = int(self.config['train_params']['lr_scheduler']['warmup'] * n_iter_per_epoch)
self.lr_scheduler = CosineAnnealingWarmupRestarts(
self.opt,
first_cycle_steps=num_steps,
cycle_mult=1.,
max_lr = self.lr,
min_lr = 1e-6,
warmup_steps=warmup_steps)
else:
self.lr_scheduler = None
def report_save(self, epoch, train_report, valid_report, step_time, save_metric, mode):
self.logger.save_model(self.net, self.val_acc_1, mode=self.save_mode)
report_data = [epoch]
for x, y in zip(train_report, valid_report):
report_data.append(x)
report_data.append(y)
report_data.append(step_time)
print(self.report_format.format(*report_data))
def train(self):
# Training process
epoch_num_batch = len(self.train_data_loader)
train_loss = []
for current_epoch in range(self.epoch):
step_time = time.time()
self.net.train()
true_report, pred_report = None, None
for idx, (data, labels) in enumerate(self.train_data_loader):
# print(data.shape,labels.shape)
# datas [b, n, h, w], labels (int label) [b]
x_batch = Variable(torch.FloatTensor(data).to(self.device), requires_grad=False)
y_batch = Variable(labels.to(self.device), requires_grad=False)
self.opt.zero_grad()
output = self.net(x_batch) # one_hot pred [b, num_classes]
loss = self.loss(output, y_batch)
loss.backward()
self.opt.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
train_loss.append(loss.item())
if true_report is None and pred_report is None:
true_report = labels.cpu().detach().numpy()
pred_report = output.cpu().detach().numpy()
else:
true_report = np.r_[true_report, labels.cpu().detach().numpy()]
pred_report = np.r_[pred_report, output.cpu().detach().numpy()]
all_report = self.metrics.report(true_report, pred_report)
step_time = time.time() - step_time
# acc-1, acc-3, pre, recall, f1, AUC
# print('Train Epoch: {} -- Loss: {:.4} -- Acc-1: {:.0%} -- AUC: {:.0%} -- Time: {}'.format(
# current_epoch, np.mean(train_loss), all_report[0], all_report[5], format_runtime(step_time)))
train_log_data = [current_epoch, np.mean(train_loss)] + all_report + [step_time]
valid_log_data = self.valid(current_epoch)
self.logger.write_train_log(train_log_data)
self.logger.write_valid_log(valid_log_data)
self.report_save(current_epoch,
[train_log_data[1], train_log_data[2], train_log_data[-2]],
[valid_log_data[1], valid_log_data[2], valid_log_data[-2]],
format_runtime(train_log_data[-1] + valid_log_data[-1]),
self.val_acc_1, self.save_mode)
# self.logger.save_model(self.net, self.val_acc_1, mode=self.save_mode)
def valid(self, current_epoch):
valid_loss = []
self.net.eval()
step_time = time.time()
true_report, pred_report = None, None
for idx, (data, labels) in enumerate(self.valid_data_loader):
x_batch = Variable(torch.FloatTensor(data).to(self.device), requires_grad=False)
y_batch = Variable(labels.to(self.device), requires_grad=False)
output = self.net(x_batch)
loss = self.loss(output, y_batch)
valid_loss.append(loss.item())
if true_report is None and pred_report is None:
true_report = labels.cpu().detach().numpy()
pred_report = output.cpu().detach().numpy()
else:
true_report = np.r_[true_report, labels.cpu().detach().numpy()]
pred_report = np.r_[pred_report, output.cpu().detach().numpy()]
all_report = self.metrics.report(true_report, pred_report)
step_time = time.time() - step_time
# print('Valid Epoch: {} -- Loss: {:.4} -- Acc-1: {:.0%} -- AUC: {:.0%} -- Time: {}'.format(
# current_epoch, np.mean(valid_loss), all_report[0], all_report[5], format_runtime(step_time)))
log_data = [current_epoch, np.mean(valid_loss)] + all_report + [step_time]
self.val_acc_1 = all_report[0]
return log_data
# Path: test.py
class Test:
def __init__(self, config, logger, net, test_data_loader, device):
self.config = config
self.device = device
self.logger = logger
self.net = net
# for param_tensor in self.net.state_dict():
# print(param_tensor, "\t", self.net.state_dict()[param_tensor].size())
self.test_data_loader = test_data_loader
self.num_classes = config['data_params']['num_classes']
Loss = LossUtils(self.device)
self.loss = Loss(config['train_params']['loss'])
self.metrics = ClassMetrics(self.num_classes, 'macro')
self.metrics.set_report_metrics(config['train_params']['report_metrics'])
self.report_format = config['train_params']['report_format']
def report_save(self, epoch, train_report, valid_report, step_time, save_metric, mode):
self.logger.save_model(self.net, self.val_acc_1, mode=self.save_mode)
report_data = [epoch]
for x, y in zip(train_report, valid_report):
report_data.append(x)
report_data.append(y)
report_data.append(step_time)
print(self.report_format.format(*report_data))
def test(self):
test_loss = []
self.net.eval()
step_time = time.time()
true_report, pred_report = None, None
for idx, (data, labels) in enumerate(self.test_data_loader):
x_batch = Variable(torch.FloatTensor(data).to(self.device), requires_grad=False)
y_batch = Variable(labels.to(self.device), requires_grad=False)
output = self.net(x_batch)
loss = self.loss(output, y_batch)
test_loss.append(loss.item())
if true_report is None and pred_report is None:
true_report = labels.cpu().detach().numpy()
pred_report = output.cpu().detach().numpy()
else:
true_report = np.r_[true_report, labels.cpu().detach().numpy()]
pred_report = np.r_[pred_report, output.cpu().detach().numpy()]
all_report = self.metrics.report(true_report, pred_report)
step_time = time.time() - step_time
# print('Valid Epoch: {} -- Loss: {:.4} -- Acc-1: {:.0%} -- AUC: {:.0%} -- Time: {}'.format(
# current_epoch, np.mean(valid_loss), all_report[0], all_report[5], format_runtime(step_time)))
# for name, data in zip(self.config['train_params']['report_metrics'], all_report):
# print('{}: {}'.format(name, data))
for name in self.config['train_params']['report_metrics']:
print(name, end='\t')
print('')
for data in all_report:
print('{:.4}'.format(data), end='\t')
print('')
print(true_report.shape)
print(pred_report.shape)
return all_report
# Path: main.py
import os
import json
import argparse
import time
from network.network import *
from utils.dataloader import CifarLoader
from utils.logger import LoggerWriter
from train import Train
from test import Test
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='config.json',
help='the path of global config file.')
parser.add_argument('--checkpoint', type=str, default=None,
help='the path of checkpoint and program will run checkpoint data.')
parser.add_argument('--model_name', type=str, default=None)
parser.add_argument('--mode', type=str, default='train',
help='the mode of app will run, plz choose among ["train", "test", "predict"]')
parser.add_argument('--device', type=str, default='cuda',
help='the device of app will run, plz choose among ["cuda", "cpu"]')
args = parser.parse_args()
model_name = args.model_name
checkpoint = args.checkpoint
mode = args.mode
device = args.device
if mode == 'train':
config_file = open(args.config, 'r').read()
config = json.loads(config_file)
else:
config_in_checkpoint = os.path.join(checkpoint, 'config', 'config.json')
config_file = open(config_in_checkpoint, 'r').read()
config = json.loads(config_file)
if 'cifar_type' in config['data_params']:
data_loader = CifarLoader(config['data_params'])
if mode == 'train':
logger = LoggerWriter(config, checkpoint)
logger.set_log_format('Epoch,Loss-CE,Acc-k1,Acc-k3,Pre,Recall,F1,AUC,Time\n')
logger.init_logs()
net_name = config['network_params']['name']
net = eval(net_name)(config['network_params'], device)
if config['data_params']['valid_scale'] == 0:
train_data_loader = data_loader.trainloader
valid_data_loader = data_loader.validloader
else:
train_data_loader = data_loader.trainloader
valid_data_loader = data_loader.validloader
trainer = Train(config, logger, net, train_data_loader, valid_data_loader, device)
trainer.train()
| if mode == 'test': |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: facebookresearch/hgap
# Path: trajectory/utils/relabel_humanoid.py
def get_speed(proprioceptive_obs):
"""
get the speed of the humanoid in from proprioceptive_obs.
Note that the speed is a non-negative scalar and do not indicate direction (different from velocity)
"""
current_vel = slice_observable(proprioceptive_obs, 'sensors_velocimeter')
if isinstance(current_vel, tf.Tensor):
speed = tf.norm(current_vel, axis=-1)
elif isinstance(current_vel, np.ndarray):
speed = np.linalg.norm(current_vel, axis=-1)
elif isinstance(current_vel, torch.Tensor):
speed = torch.norm(current_vel, dim=-1)
return speed
# Path: trajectory/utils/relabel_humanoid.py
def get_angular_vel(proprioceptive_obs, direction):
"""
get the angular speed of the humanoid in from proprioceptive_obs.
"""
angular_vel = slice_observable(proprioceptive_obs, 'sensors_gyro')
if direction == 'x':
angular_vel = angular_vel[..., 0]
elif direction == 'y':
angular_vel = angular_vel[..., 1]
elif direction == 'z':
angular_vel = angular_vel[..., 2]
return angular_vel
# Path: trajectory/utils/relabel_humanoid.py
def get_body_height(proprioceptive_obs):
"""
get the height of the humanoid in from proprioceptive_obs.
"""
return slice_observable(proprioceptive_obs, 'body_height')[..., 0]
# Path: trajectory/utils/relabel_humanoid.py
def get_vel(proprioceptive_obs, direction):
current_vel = slice_observable(proprioceptive_obs, 'sensors_velocimeter')
if direction == 'x':
vel = current_vel[..., 0]
elif direction == 'y':
vel = current_vel[..., 1]
elif direction == 'z':
vel = current_vel[..., 2]
else:
raise ValueError(f"direction {direction} not found")
return vel
# Path: trajectory/utils/relabel_humanoid.py
def get_left_vel(proprioceptive_obs):
"""
get the left speed of the humanoid in from proprioceptive_obs.
cancel the sub-velocity towards world z axis according to the ego-centric world z axis.
"""
ego_centric_vel = slice_observable(proprioceptive_obs, 'sensors_velocimeter')
world_zaxis = slice_observable(proprioceptive_obs, 'world_zaxis')
left_vel = project_left(ego_centric_vel, world_zaxis)
return left_vel
# Path: trajectory/utils/relabel_humanoid.py
def get_height_vel(proprioceptive_obs):
"""
get the height speed of the humanoid in from proprioceptive_obs.
"""
ego_centric_vel = slice_observable(proprioceptive_obs, 'sensors_velocimeter')
world_zaxis = slice_observable(proprioceptive_obs, 'world_zaxis')
height_vel = project_height(ego_centric_vel, world_zaxis)
return height_vel
# Path: trajectory/utils/relabel_humanoid.py
def get_forward_vel(proprioceptive_obs):
"""
get the forward speed of the humanoid in from proprioceptive_obs.
cancel the sub-velocity towards world z axis according to the ego-centric world z axis.
"""
ego_centric_vel = slice_observable(proprioceptive_obs, 'sensors_velocimeter')
world_zaxis = slice_observable(proprioceptive_obs, 'world_zaxis')
forward_vel = project_forward(ego_centric_vel, world_zaxis)
return forward_vel
# Path: trajectory/tfds/mocap_utils.py
CMU_HUMANOID_OBSERVABLES = (
"walker/actuator_activation",
"walker/appendages_pos",
"walker/body_height",
"walker/end_effectors_pos",
"walker/joints_pos",
"walker/joints_vel",
"walker/sensors_accelerometer",
"walker/sensors_gyro",
"walker/sensors_torque",
"walker/sensors_touch",
"walker/sensors_velocimeter",
"walker/world_zaxis",
)
MEAN_ACTION = "mean_action"
def _get_dataset_keys(h5file):
def visitor(name, item):
def _read_group(dataset_file):
def __init__(self, path: str) -> None:
def h5_file(self):
def n_rsi_rollouts(self):
def n_start_rollouts(self):
def ref_steps(self):
def observable_indices(self):
def stats(self):
def snippet_group_names(self):
def get_snippet_start_metrics(self, snippet_name: str):
def get_snippet_rsi_metrics(self, snippet_name: str):
def get_snippet_early_termination(self, snippet_name: str):
def get_snippet_num_episodes(self, snippet_name: str):
def get_snippet_episode(self, snippet_name: str, episode_id: int):
def get_episode_from_key(self, key):
def keys(self):
def __init__(
self,
pattern: Union[str, Sequence[str]],
metrics_path: str,
observables: Sequence[str] = CMU_HUMANOID_OBSERVABLES,
concat_observations: bool = False,
mean_actions: bool = False,
normalize_observations: bool = False,
normalize_actions: bool = False,
need_to_extract_observables: bool = True,
) -> None:
def _set_element_spec(self, example_episode):
def _set_environment_specs(self, episode) -> None:
def action_spec(self):
def observation_spec(self):
def reward_spec(self):
def discount_spec(self):
def _set_metrics(self, path):
def make_episode_dataset(
self,
shuffle_files: bool = True,
num_parallel_calls: int = tf.data.AUTOTUNE,
) -> tf.data.Dataset:
def episode_generator(path):
def maybe_normalize_steps(episode):
def extract_observations(episode):
def convert_to_rlds_format(episode):
def _pad_last(nest):
def _fused_preprocess_fn(episode):
def __init__(
self,
name: str,
split: str = "train",
observables: Sequence[str] = CMU_HUMANOID_OBSERVABLES,
normalize_observations: bool = False,
normalize_actions: bool = False,
use_mean_actions: bool = False,
concat_observations: bool = True,
tfds_data_dir: Optional[str] = None,
):
def set_mean_std(self):
def make_episode_dataset(self):
def normalize_steps(steps, normalize_observations, normalize_actions):
def choose_actions(steps, use_mean_actions: bool):
def extract_observations(
steps, observables: Sequence[str], concat_observations: bool
):
def preprocess_steps(steps):
def transform_episode(episode_dataset):
def _pad_array_along_axis(x, padded_size, axis=0, value=0):
def _pad_along_axis(nest, padding_size, axis=0, value=0):
def pad_steps(steps, max_len: int, add_mask: bool = True):
class MocapActTrajectoryReader:
class MocapActHDF5DataSource:
class MocapActTFDSDataSource:
# Path: trajectory/datasets/mocapact.py
from torch.utils.data import Dataset
from gym import spaces
from typing import Dict, Sequence, Text, Optional, Union
from stable_baselines3.common.running_mean_std import RunningMeanStd
from trajectory.utils.relabel_humanoid import get_speed, get_angular_vel, get_body_height, get_vel, get_left_vel, get_height_vel, get_forward_vel
from trajectory.tfds import mocap_utils
import bisect
import h5py
import itertools
import collections
import numpy as np
import functools
import torch
import rlds
import tensorflow as tf
import tensorflow_datasets as tfds
import os
self.validation_data_loader = validation_dataset.prefetch(tf.data.AUTOTUNE)
self.validation_data_iterator = self.validation_data_loader.as_numpy_iterator()
self.validation_set = [self.numpy_data_to_torch(validation_batch) for validation_batch in self.validation_data_iterator]
else:
self.validation_set = []
self.checkpoint_path = checkpoint_path
self._checkpoint = tf.train.Checkpoint(train=self.data_loader, validation=self.validation_data_loader)
def pre_process(self, dataset, relabel_type, discount, sequence_length, deterministic, repeat, shuffle_buffer, batch_size,
reward_clip, body_height_limit, body_height_penalty, ignore_incomplete_episodes=False):
denorm_func = self.denormalize_observations if self.normalize_obs else None
dataset = dataset.map(
functools.partial(relabel_reward, relabel_type=relabel_type, denormalize=denorm_func,
reward_clip=reward_clip, body_height_limit=body_height_limit, body_height_penalty=body_height_penalty),
num_parallel_calls=tf.data.AUTOTUNE
)
dataset = dataset.map(
functools.partial(overwrite_value, discount=discount),
num_parallel_calls=tf.data.AUTOTUNE
)
if self.normalize_reward:
dataset = dataset.map(
functools.partial(normalize_reward_return, reward_mean=self.reward_mean, return_mean=self.return_mean,
reward_std=self.reward_std, return_std=self.return_std), num_parallel_calls=tf.data.AUTOTUNE)
dataset: tf.data.Dataset = dataset.interleave(
lambda episode: rlds.transformations.batch(
episode["steps"], size=sequence_length, shift=1, drop_remainder=ignore_incomplete_episodes
),
deterministic=deterministic,
num_parallel_calls=tf.data.AUTOTUNE,
cycle_length=16,
block_length=16
)
dataset = dataset.map(
functools.partial(mocap_utils.pad_steps, max_len=sequence_length)
)
if repeat:
dataset = dataset.repeat()
if shuffle_buffer:
dataset = dataset.shuffle(100000, reshuffle_each_iteration=True, seed=0 if deterministic else None)
dataset = dataset.batch(batch_size, num_parallel_calls=tf.data.AUTOTUNE)
return dataset
@property
def observation_dim(self):
return self.data_loader.element_spec["observation"].shape[-1]
@property
def action_dim(self):
return self.data_loader.element_spec["action"].shape[-1]
def numpy_data_to_torch(self, numpy_data):
observation = torch.tensor(numpy_data["observation"], dtype=torch.float32)
action = torch.tensor(numpy_data["action"], dtype=torch.float32)
reward = torch.tensor(numpy_data["reward"], dtype=torch.float32)[..., None]
value = torch.tensor(numpy_data["value"], dtype=torch.float32)[..., None]
terminal = torch.tensor(numpy_data["is_terminal"], dtype=torch.float32)[..., None]
terminal = 1 - torch.cumprod(1 - terminal, dim=1)
mask = torch.tensor(numpy_data["mask"], dtype=torch.float32)[..., None]
# also mask out if the actions are all zero (hacky way to deal with padding)
mask *= (torch.logical_not(torch.all((action == 0), dim=-1, keepdim=True))).float()
joined = torch.tensor(np.concatenate([observation, action, reward, value], axis=-1), dtype=torch.float32)
return joined, mask, terminal
def __iter__(self):
return self
def __next__(self):
numpy_data = next(self.data_iterator)
joined, mask, terminal = self.numpy_data_to_torch(numpy_data)
return joined, mask, terminal
def _extract_observations(self, all_obs: np.ndarray, observable_keys: Sequence[Text]):
return {k: all_obs[..., self.observable_indices[k]] for k in observable_keys}
def denormalize_reward(self, rewards):
return rewards * self.reward_std + self.reward_mean
def denormalize_return(self, returns):
return returns * self.return_std + self.return_mean
def normalize_observations(self, states):
states_std = np.squeeze(np.array(self.proprio_std))
states_mean = np.squeeze(np.array(self.proprio_mean))
if self.need_to_extract_observables:
states_std = self._extract_observations(states_std, self._observables)
states_mean = self._extract_observations(states_mean, self._observables)
states_std = np.concatenate(list(states_std.values()), axis=-1)
states_mean = np.concatenate(list(states_mean.values()), axis=-1)
if torch.is_tensor(states):
states_std = torch.Tensor(states_std).to(states.device)
states_mean = torch.Tensor(states_mean).to(states.device)
return (states - states_mean) / states_std
def denormalize_observations(self, observations):
states_std = np.squeeze(np.array(self.proprio_std))
states_mean = np.squeeze(np.array(self.proprio_mean))
if self.need_to_extract_observables:
obs_std = self._extract_observations(states_std, self._observables)
obs_mean = self._extract_observations(states_mean, self._observables)
obs_std = np.concatenate(list(obs_std.values()), axis=-1)
obs_mean = np.concatenate(list(obs_mean.values()), axis=-1)
else:
obs_std = states_std
obs_mean = states_mean
if torch.is_tensor(observations):
obs_std = torch.Tensor(states_std).to(observations.device)
obs_mean = torch.Tensor(states_mean).to(observations.device)
return observations * obs_std + obs_mean
| def denormalize_states(self, states): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Cocon-Se/gpyrobotstxt
# Path: gpyrobotstxt/robots_cc.py
class RobotsMatcher:
def __init__(self):
self._seen_global_agent = False
self._seen_specific_agent = False
self._ever_seen_specific_agent = False
self._seen_separator = False
self._path = None
self._user_agents = None
self._allow = MatchHierarchy()
self._disallow = MatchHierarchy()
self._match_strategy = RobotsMatchStrategy()
def init_user_agents_and_path(self, user_agents: List[str], path: str):
if path[0] != "/":
raise ValueError("Path must begin with '/'")
self._path = path
self._user_agents = user_agents
def extract_user_agent(self, user_agent: str):
# Allowed characters in user-agent are [a-zA-Z_-].
def ascii_is_alpha(c):
return "a" <= c <= "z" or "A" <= c <= "Z"
i = 0
while i < len(user_agent):
c = user_agent[i]
if not ascii_is_alpha(c) and not c == "-" and not c == "_":
break
i += 1
return user_agent[:i]
def extract_user_agent_rfc7231(self, user_agent: str):
# extract_user_agent extracts the matchable part of a user agent string,
# essentially stopping at the first invalid character.
# Example: 'Googlebot/2.1' becomes 'Googlebot'
# Allowed characters in user-agent are [a-zA-Z_-].
#
# Bugfix:
#
# According to RFC 7231, the 'product' part of the user-agent
# is defined as a 'token', which allows:
#
# "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
#
# See https://httpwg.org/specs/rfc7231.html#header.user-agent
def ascii_is_alpha(c):
return "a" <= c <= "z" or "A" <= c <= "Z"
def ascii_is_numeric(c):
return "0" <= c <= "9"
def ascii_is_special(c):
allowed = "~#$%'*+-.^_`|~"
return c in allowed
i = 0
while i < len(user_agent):
c = user_agent[i]
if (
not ascii_is_alpha(c)
and not ascii_is_numeric(c)
and not ascii_is_special(c)
):
break
i += 1
return user_agent[:i]
def seen_any_agent(self):
return self._seen_global_agent or self._seen_specific_agent
def handle_robots_start(self):
# This is a new robots.txt file, so we need to reset all the instance member variables.
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L538
self._allow.clear()
self._disallow.clear()
self._seen_global_agent = False
self._seen_specific_agent = False
self._ever_seen_specific_agent = False
self._seen_separator = False
def handle_robots_end(self):
# handle_robots_end is called at the end of parsing the robots.txt file.
# For RobotsMatcher, this does nothing.
pass
def handle_user_agent(self, line_num, user_agent):
# handle_user_agent is called for every "User-Agent:" line in robots.txt.
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L567
if self._seen_separator:
self._seen_global_agent = False
self._seen_specific_agent = False
self._seen_separator = False
# Google-specific optimization: a '*' followed by space and more characters
# in a user-agent record is still regarded a global rule.
if (
len(user_agent) >= 1
and user_agent[0] == "*"
and (len(user_agent) == 1 or user_agent[1].isspace())
):
self._seen_global_agent = True
else:
user_agent = self.extract_user_agent(user_agent)
for agent in self._user_agents:
if agent.casefold() == user_agent.casefold():
self._ever_seen_specific_agent = True
self._seen_specific_agent = True
break
def handle_allow(self, line_num, value):
# handle_allow is called for every "Allow:" line in robots.txt.
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L589
if not self.seen_any_agent():
return
self._seen_separator = True
priority = self._match_strategy.match_allow(self._path, value)
if priority >= 0:
if self._seen_specific_agent:
if self._allow._specific.priority < priority:
self._allow._specific.set(priority, line_num)
else:
if not self._seen_global_agent:
raise SyntaxError("Not seen global agent")
if self._allow._global.priority < priority:
self._allow._global.set(priority, line_num)
else:
# Google-specific optimization: 'index.htm' and 'index.html' are normalized to '/'
slash_pos = value.rfind("/")
if slash_pos != -1 and value[slash_pos:].startswith("/index.htm"):
new_pattern = value[: slash_pos + 1] + "$"
self.handle_allow(line_num, new_pattern)
def handle_disallow(self, line_num, value):
# handle_disallow is called for every "Disallow:" line in robots.txt.
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L622
if not self.seen_any_agent():
return
self._seen_separator = True
priority = self._match_strategy.match_disallow(self._path, value)
if priority >= 0:
if self._seen_specific_agent:
if self._disallow._specific.priority < priority:
self._disallow._specific.set(priority, line_num)
else:
if not self._seen_global_agent:
raise SyntaxError("Not seen global agent")
if self._disallow._global.priority < priority:
self._disallow._global.set(priority, line_num)
def handle_sitemap(self, line_num, value):
# handle_sitemap is called for every "Sitemap:" line in robots.txt.
# For RobotsMatcher, this does nothing.
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L650
pass
def handle_unknown_action(self, line_num, action, value):
# handle_unknown_action is called for every unrecognized line in robots.txt.
# For RobotsMatcher, this does nothing.
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L652
pass
def disallow(self):
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L506
if self._allow._specific.priority > 0 or self._disallow._specific.priority > 0:
return self._disallow._specific.priority > self._allow._specific.priority
if self._ever_seen_specific_agent:
return False
if self._allow._global.priority > 0 or self._disallow._global.priority > 0:
return self._disallow._global.priority > self._allow._global.priority
return False
def allowed_by_robots(self, robots_body, user_agents, url):
# Ref: https://github.com/google/robotstxt/blob/master/robots.cc#L487
try:
urlparse(url)
except:
return False
if isinstance(robots_body, str):
robots_body = robots_body.encode("utf-8")
path: str = get_path_params_query(url)
self.init_user_agents_and_path(user_agents, path)
parser = RobotsTxtParser(robots_body, self)
parser.parse()
return not self.disallow()
def one_agent_allowed_by_robots(self, robots_txt, user_agent, url):
return self.allowed_by_robots(robots_txt, [user_agent], url)
def is_valid_user_agent_to_obey(self, user_agent):
return len(user_agent) > 0 and self.extract_user_agent(user_agent) == user_agent
# Path: gpyrobotstxt/robotstxtparser.py
class RobotsTxtParser:
def __init__(self, robots_body, handler):
self._robots_body = robots_body
self._handler = handler
def get_key_and_value_from(self, line: str):
# get_key_and_value_from attempts to parse a line of robots.txt into a key/value pair.
# On success, the parsed key and value, and true, are returned.
# If parsing is unsuccessful, parseKeyAndValue returns two empty strings and false.
# Remove comments from the current robots.txt line
comment = line.find("#")
if comment != -1:
line = line[:comment]
line = line.strip()
# Rules must match the following pattern:
# <key>[ \t]*:[ \t]*<value>
sep = line.find(":")
if sep == -1:
# Google-specific optimization: some people forget the colon, so we need to
# accept whitespace in its stead.
white = re.compile("|".join([" ", "\t"]))
sep = white.search(line)
if sep is not None:
sep = sep.start()
val = line[sep + 1 :]
if len(val) == 0:
raise SyntaxError("Syntax error in 'robots.txt' file.")
if white.search(val) is not None:
# We only accept whitespace as a separator if there are exactly two
# sequences of non-whitespace characters. If we get here, there were
# more than 2 such sequences since we stripped trailing whitespace above.
return "", "", False
if sep == -1:
return "", "", False # Couldn't find a separator.
key = line[:sep].strip()
if len(key) == 0:
return "", "", False
value = line[sep + 1 :].strip()
return key, value, True
def need_escape_value_for_key(self, key):
key_type = key.type()
if key_type in [
ParsedRobotsKey.KeyType.USER_AGENT,
ParsedRobotsKey.KeyType.SITEMAP,
]:
return False
else:
return True
def maybe_escape_pattern(self, path: str):
need_capitalize = False
num_to_escape = 0
def s(i):
if i < len(path):
return path[i]
return ""
# First, scan the buffer to see if changes are needed. Most don't.
for i in range(len(path)):
# (a) % escape sequence.
if path[i] == "%" and ishexdigit(s(i + 1)) and ishexdigit(s(i + 2)):
if s(i + 1).islower() or s(i + 2).islower():
need_capitalize = True
elif not path[i].isascii():
# (b) needs escaping.
num_to_escape += 1
# (c) Already escaped and escape-characters normalized (eg. %2f -> %2F).
# Return if no changes needed.
if num_to_escape == 0 and not need_capitalize:
return path
dst = io.BytesIO()
i = 0
while (i < len(path)):
if path[i] == '%' and ishexdigit(s(i + 1)) and ishexdigit(s(i + 2)):
# (a) Normalize %-escaped sequence (eg. %2f -> %2F).
dst.write(b'%')
i += 1
dst.write(path[i].upper().encode())
i += 1
dst.write(path[i].upper().encode())
elif not path[i].isascii():
# (b) %-escape octets whose highest bit is set. These are outside the ASCII range
dst.write(b'%')
dst.write(path[i].encode().hex('%').upper().encode())
else:
# (c) Normal character, no modification needed.
dst.write(path[i].encode())
i += 1
return dst.getvalue().decode('utf-8')
def emit_key_value_to_handler(self, line, key, value, handler):
key_type = key.type()
if key_type == ParsedRobotsKey.KeyType.USER_AGENT:
handler.handle_user_agent(line, value)
elif key_type == ParsedRobotsKey.KeyType.ALLOW:
handler.handle_allow(line, value)
elif key_type == ParsedRobotsKey.KeyType.DISALLOW:
handler.handle_disallow(line, value)
elif key_type == ParsedRobotsKey.KeyType.SITEMAP:
handler.handle_sitemap(line, value)
elif key_type == ParsedRobotsKey.KeyType.UNKNOWN:
handler.handle_unknown_action(line, key.unknown_key(), value)
def parse_and_emit_line(self, current_line: int, line: str):
string_key, value, ok = self.get_key_and_value_from(line)
if not ok:
return
key = ParsedRobotsKey()
key.parse(string_key)
if self.need_escape_value_for_key(key):
value = self.maybe_escape_pattern(value)
self.emit_key_value_to_handler(current_line, key, value, self._handler)
def parse(self):
utf_bom = bytes([0xEF, 0xBB, 0xBF])
# Certain browsers limit the URL length to 2083 bytes. In a robots.txt, it's
# fairly safe to assume any valid line isn't going to be more than many times
# that max url length of 2KB. We want some padding for
# UTF-8 encoding/nulls/etc. but a much smaller bound would be okay as well.
# If so, we can ignore the chars on a line past that.
kmax_line_len = 2083 * 8
self._handler.handle_robots_start()
length = len(self._robots_body)
cur = 0
# Skip BOM if present - including partial BOMs.
for i in range(len(utf_bom)):
if cur == length:
break
b = self._robots_body[cur]
if b != utf_bom[i]:
break
cur += 1
line_num = 0
last_was_carriage_return = False
start = cur
end = cur
while True:
if cur == length:
break
b = self._robots_body[cur]
cur += 1
if b != 0x0A and b != 0x0D: # Non-line-ending char case.
# Add to current line, as long as there's room.
if end - start < kmax_line_len - 1:
end += 1
else:
is_CRLF_continuation = (
(end == start) and last_was_carriage_return and (b == 0x0A)
)
if not is_CRLF_continuation:
line_num += 1
self.parse_and_emit_line(line_num, self._robots_body[start:end].decode('utf-8', 'replace'))
start = cur
end = cur
last_was_carriage_return = b == 0x0D
line_num += 1
self.parse_and_emit_line(line_num, self._robots_body[start:end].decode('utf-8', 'replace'))
self._handler.handle_robots_end()
# Path: test/test_robots_matcher.py
import unittest
from gpyrobotstxt.robots_cc import RobotsMatcher
from gpyrobotstxt.robotstxtparser import RobotsTxtParser
self.digest(line_num)
def handle_sitemap(self, line_num, value):
self.digest(line_num)
self._sitemap += value
def handle_unknown_action(self, line_num, action, value):
self._last_line_seen = line_num
self._unknown_directives += 1
def digest(self, line_num):
assert line_num >= self._last_line_seen
self._last_line_seen = line_num
self._valid_directives += 1
@property
def last_line_seen(self):
return self._last_line_seen
@property
def valid_directives(self):
return self._valid_directives
@property
def unknown_directives(self):
return self._unknown_directives
@property
def sitemap(self):
return self._sitemap
class TestRobotsStatsReporter(unittest.TestCase):
def setUp(self):
self.report = RobotsStatsReporter()
# Different kinds of line endings are all supported: %x0D / %x0A / %x0D.0A
def test_ID_LinesNumbersAreCountedCorrectly(self):
unix_file = (
"User-Agent: foo\n"
"Allow: /some/path\n"
"User-Agent: bar\n"
"\n"
"\n"
"Disallow: /\n"
)
parser = RobotsTxtParser(unix_file.encode("UTF-8"), self.report)
parser.parse()
self.assertEqual(4, self.report.valid_directives)
self.assertEqual(6, self.report.last_line_seen)
dos_file = (
"User-Agent: foo\r\n"
"Allow: /some/path\r\n"
"User-Agent: bar\r\n"
"\r\n"
"\r\n"
"Disallow: /\r\n"
)
parser = RobotsTxtParser(dos_file.encode("UTF-8"), self.report)
parser.parse()
self.assertEqual(4, self.report.valid_directives)
self.assertEqual(6, self.report.last_line_seen)
mac_file = (
"User-Agent: foo\r\n"
"Allow: /some/path\r\n"
"User-Agent: bar\r\n"
"\r\n"
"\r\n"
"Disallow: /\r\n"
)
parser = RobotsTxtParser(mac_file.encode("UTF-8"), self.report)
parser.parse()
self.assertEqual(4, self.report.valid_directives)
self.assertEqual(6, self.report.last_line_seen)
no_finale_new_line = (
"User-Agent: foo\n"
"Allow: /some/path\n"
"User-Agent: bar\n"
"\n"
"\n"
"Disallow: /"
)
parser = RobotsTxtParser(no_finale_new_line.encode("UTF-8"), self.report)
parser.parse()
self.assertEqual(4, self.report.valid_directives)
self.assertEqual(6, self.report.last_line_seen)
mixed_file = (
"User-Agent: foo\n"
"Allow: /some/path\r\n"
"User-Agent: bar\n"
"\r\n"
"\n"
"Disallow: /"
)
parser = RobotsTxtParser(mixed_file.encode("UTF-8"), self.report)
parser.parse()
self.assertEqual(4, self.report.valid_directives)
self.assertEqual(6, self.report.last_line_seen)
# BOM characters are unparseable and thus skipped. The rules following the line are used.
def test_ID_UTF8ByteOrderMarkIsSkipped(self):
utf8_file_full_BOM = b"\xEF\xBB\xBF" b"User-Agent: foo\n" b"Allow: /AnyValue\n"
parser = RobotsTxtParser(utf8_file_full_BOM, self.report)
parser.parse()
self.assertEqual(2, self.report.valid_directives)
self.assertEqual(0, self.report.unknown_directives)
utf8_file_partial2BOM = b"\xEF\xBB" b"User-Agent: foo\n" b"Allow: /AnyValue\n"
# We allow as well partial ByteOrderMarks.
parser = RobotsTxtParser(utf8_file_partial2BOM, self.report)
parser.parse()
self.assertEqual(2, self.report.valid_directives)
self.assertEqual(0, self.report.unknown_directives)
utf8_file_partial1BOM = b"\xEF" b"User-Agent: foo\n" b"Allow: /AnyValue\n"
parser = RobotsTxtParser(utf8_file_partial1BOM, self.report)
| parser.parse() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: baiyimeng/LabelCraft
# Path: trainer.py
class Trainer(object):
def __init__(self, flags_obj, cm, dm, new_config=None):
self.cm = cm #context manager
self.dm = dm #dataset manager
self.flags_obj = flags_obj
self.recommender = CM.set_recommender(flags_obj, cm.workspace, dm, new_config)
self.device = CM.set_device(self.flags_obj)
self.recommender.transfer_model(self.device)
self.lr_rec = self.flags_obj.lr_rec
self.lr_label = self.flags_obj.lr_label
self.set_train_dataloader()
self.set_valid_dataloader()
self.set_test_dataloader()
set_device_for_topk(self.device)
self.topk_model = TopK_custom(k=10, epsilon=1e-4).to(self.device)
if self.flags_obj.dataset_name == 'kuaishou':
if self.flags_obj.normalization == 1:
def normalize_play_time(tensor):
mask = tensor <= 50.0
y = torch.zeros_like(tensor)
y[mask] = tensor[mask] / 50.0 * 0.2
if len(mask) < len(tensor):
y[~mask] = torch.min( (tensor[~mask]-50.0) / (800.0-50.0) * 0.8 + 0.2, torch.tensor([1.0]).to(self.device))[0]
return y
def normalize_duration(tensor):
mask = tensor <= 235.0
y = torch.zeros_like(tensor)
y[mask] = tensor[mask] / 235.0 * 0.2
if len(mask) < len(tensor):
y[~mask] = torch.min( (tensor[~mask]-235.0) / (3600.0-235.0) * 0.8 + 0.2, torch.tensor([1.0]).to(self.device))[0]
return y
else:
def normalize_play_time(tensor):
return tensor
def normalize_duration(tensor):
return tensor
elif self.flags_obj.dataset_name == 'wechat':
if self.flags_obj.normalization == 1:
def normalize_play_time(tensor):
mask = tensor <= 51.3
y = torch.zeros_like(tensor)
y[mask] = tensor[mask] / 51.3 * 0.2
if len(mask) < len(tensor):
y[~mask] = torch.min( (tensor[~mask]-51.3) / (8514.0-51.3) * 0.8 + 0.2, torch.tensor([1.0]).to(self.device))[0]
return y
def normalize_duration(tensor):
mask = tensor <= 59.0
y = torch.zeros_like(tensor)
y[mask] = tensor[mask] / 59.0 * 0.2
if len(mask) < len(tensor):
y[~mask] = torch.min( (tensor[~mask]-59.0) / (10275.0-59.0) * 0.8 + 0.2, torch.tensor([1.0]).to(self.device))[0]
return y
else:
def normalize_play_time(tensor):
return tensor
def normalize_duration(tensor):
return tensor
self.normalize_duration = normalize_duration
self.normalize_play_time = normalize_play_time
def set_train_dataloader(self):
self.train_dataset = self.recommender.get_dataset(const_util.train_file, self.dm, True)
self.train_dataloader = get_dataloader(
data_set = self.train_dataset,
bs = self.dm.batch_size,
collate_fn = self.train_dataset.collate_func,
shuffle = True
)
def set_valid_dataloader(self, k=10):
self.valid_dataset = self.recommender.get_dataset(const_util.valid_file, self.dm, False)
self.valid_user_counts = self.valid_dataset.sampler.record.groupby('user_id').size().to_dict()
self.valid_user_counts= {key: value for key, value in self.valid_user_counts.items() if value > k}
self.valid_dataloader = get_dataloader(
data_set = self.valid_dataset,
bs = self.dm.batch_size,
collate_fn = self.valid_dataset.collate_func,
shuffle = False
)
def set_test_dataloader(self, k=10):
self.test_dataset = self.recommender.get_dataset(const_util.test_file, self.dm, False)
self.test_user_counts = self.test_dataset.sampler.record.groupby('user_id').size().to_dict()
self.test_user_counts= {key: value for key, value in self.test_user_counts.items() if value > k}
self.test_dataloader = get_dataloader(
data_set = self.test_dataset,
bs = self.dm.batch_size,
collate_fn = self.test_dataset.collate_func,
shuffle = False
)
def test(self, k=10, user_counts=None, dataset=None):
with torch.no_grad():
model = self.recommender.model
result_test_k = dict()
test_user_num = len(user_counts)
test_user_weight = list(user_counts.values())
temp = sum(test_user_weight)
test_user_weight = [x / temp for x in test_user_weight]
for u in list(user_counts.keys()):
items_info, feedbacks_info, scores = self.get_score_test(u, model, dataset=dataset)
play_time = feedbacks_info[:, 0].float()
duration = feedbacks_info[:, 1].float()
lfc = feedbacks_info[:, 2].float()
if len(play_time) > k:
_, top_indices = torch.topk(scores, k=k)
pos_weight = torch.log2(torch.arange(2, k+2)).to(self.device)
duration_result = torch.std(duration[top_indices]).cpu().numpy()
top_play_time_pos = torch.dot(pos_weight, torch.topk(play_time, k=k)[0])
top_play_time_pos_by_score = torch.dot(pos_weight, play_time[top_indices])
play_time_pos_result = (top_play_time_pos_by_score / top_play_time_pos).cpu().numpy() if top_play_time_pos > 0 else np.array(1.0)
top_lfc_pos = torch.dot(pos_weight, torch.topk(lfc, k=k)[0])
top_lfc_pos_by_score = torch.dot(pos_weight, lfc[top_indices])
lfc_pos_result = (top_lfc_pos_by_score / top_lfc_pos).cpu().numpy() if top_lfc_pos > 0 else np.array(1.0)
result_test_k[u] = [duration_result,
play_time_pos_result,
lfc_pos_result]
temp = [v[0] for v in list(result_test_k.values())]
duration_average_result = np.sum(temp) / test_user_num
duration_weighted_result = sum([test_user_weight[i] * temp[i] for i in range(len(test_user_weight))])
print("top@{} {} duration std:{}".format(k, 'average', duration_average_result))
print("top@{} {} duration std:{}".format(k, 'weighted', duration_weighted_result))
temp = [v[1] for v in list(result_test_k.values())]
play_time_pos_average_result = np.sum(temp) / test_user_num
play_time_pos_weighted_result = sum([test_user_weight[i] * temp[i] for i in range(len(test_user_weight))])
print("top@{} {} play time ratio:{}".format(k, 'average', play_time_pos_average_result))
print("top@{} {} play time ratio:{}".format(k, 'weighted', play_time_pos_weighted_result))
temp = [v[2] for v in list(result_test_k.values())]
lfc_pos_average_result = np.sum(temp) / test_user_num
lfc_pos_weighted_result = sum([test_user_weight[i] * temp[i] for i in range(len(test_user_weight))])
print("top@{} {} lfc ratio:{}".format(k, 'average', lfc_pos_average_result))
print("top@{} {} lfc ratio:{}".format(k, 'weighted', lfc_pos_weighted_result))
result = [duration_average_result, duration_weighted_result,
play_time_pos_average_result, play_time_pos_weighted_result,
lfc_pos_average_result, lfc_pos_weighted_result]
return result
def get_score_test(self, user, model, params_updated=None, dataset=None):
sample = dataset.get_user_batch_final(user)
sample = [[k.to(self.device) for k in i] if type(i) == list else i.to(self.device) for i in sample]
input_data, feedbacks = sample[:-1], sample[3]
scores = model.forward(input_data=input_data, params_updated=params_updated) # shape: [B]
items_info = sample[1]
feedbacks_info = sample[3]
return items_info, feedbacks_info, scores
def train_and_test(self):
train_loss = [] #store every training loss
valid_metric = [] #store every validation metric
not_better = 0
best_metric = 0
save_name = ''
for name_str, name_val in self.recommender.model_config.items():
save_name += name_str + '_' + str(name_val) + '_'
for epoch in range(self.flags_obj.epochs):
loss = self.train_one_epoch(epoch, self.train_dataloader)
print("TRAIN")
print("train loss:{}".format(loss))
train_loss.append(loss)
if epoch % 1 == 0:
print("VALIDATE")
valid_result = self.test(k=10, user_counts=self.valid_user_counts, dataset=self.valid_dataset)
valid_loss = self.get_loss(self.valid_dataloader)
print("valid loss:", valid_loss)
valid_metric.append(valid_result[3])
print("TEST")
test_result = self.test(k=10, user_counts=self.test_user_counts, dataset=self.test_dataset)
test_loss = self.get_loss(self.test_dataloader)
print("test loss:", test_loss)
if valid_result[3] > best_metric + 1e-4:
not_better = 0
best_metric = valid_result[3]
torch.save(self.recommender.model.state_dict(), './workspace/ckpt/rec_' + self.flags_obj.extra_name + '_' + save_name + '.pth')
torch.save(self.recommender.labeler.state_dict(), './workspace/ckpt/labeler_' + self.flags_obj.extra_name + '_' + save_name + '.pth')
else:
not_better += 1
if not_better >= 5:
print("end at epoch {}".format(epoch))
print("train loss", train_loss)
print("valid metric", valid_metric)
break
print('best result:')
self.recommender.model.load_state_dict(torch.load('./workspace/ckpt/rec_' + self.flags_obj.extra_name + '_' + save_name + '.pth'))
self.recommender.labeler.load_state_dict(torch.load('./workspace/ckpt/labeler_' + self.flags_obj.extra_name + '_' + save_name + '.pth'))
valid_result = self.test(k=10, user_counts=self.valid_user_counts, dataset=self.valid_dataset)
test_result = self.test(k=10, user_counts=self.test_user_counts, dataset=self.test_dataset)
def train_one_epoch(self, epoch, dataloader):
self.recommender.model.train()
self.recommender.labeler.train()
loss_train = 0
for step, sample in enumerate(dataloader):
#t0 = time()
sample = [[k.to(self.device) for k in i] if type(i) == list else i.to(self.device) for i in sample]
#t1 = time()
#print("Time: sample to device ", t1-t0)
model_assume = copy.deepcopy(self.recommender.model).to(self.device)
for param in model_assume.parameters():
param.grad = None
input_data, feedbacks = sample[:-1], sample[-1]
labels = self.recommender.labeler(feedbacks).squeeze(-1)
loss_assume = model_assume.forward(input_data, labels)
grads = torch.autograd.grad(loss_assume, model_assume.parameters(), create_graph=True, retain_graph=True)
params_assume_updated = list()
for i, param in enumerate(model_assume.parameters()):
param_updated = param - self.lr_rec * grads[i]
params_assume_updated.append(param_updated)
#t2 = time()
#print("Time: substep1 ", t2-t1)
valid_user_num = len(self.valid_user_counts)
sampled_users = random.sample(list(self.valid_user_counts.keys()), int(valid_user_num * self.flags_obj.sample_ratio))
result = dict()
for u in sampled_users:
result[u] = self.get_score_test(u, model_assume, params_assume_updated, self.valid_dataset)
total_play_time, total_duration, total_lfc= self.get_metric(result=result, k=10)
total_play_time = torch.stack(total_play_time)
total_duration = torch.stack(total_duration)
total_lfc = torch.stack(total_lfc)
metric_play_time, metric_duration, metric_lfc = sum(total_play_time), sum(total_duration), sum(total_lfc)
grads_alpha_play_time = torch.autograd.grad(metric_play_time, self.recommender.labeler.parameters(), retain_graph=True)
grads_alpha_duration = torch.autograd.grad(metric_duration, self.recommender.labeler.parameters(), retain_graph=True)
grads_alpha_lfc = torch.autograd.grad(metric_lfc, self.recommender.labeler.parameters(), retain_graph=True)
grads_alpha_play_time_fc = self.flatten_and_concatenate(grads_alpha_play_time)
grads_alpha_duration_fc = self.flatten_and_concatenate(grads_alpha_duration)
grads_alpha_lfc_fc = self.flatten_and_concatenate(grads_alpha_lfc)
if self.flags_obj.adapt == 0:
weight = torch.tensor([1/3, 1/3, 1/3], dtype=torch.float32)
elif self.flags_obj.disable > 0:
weight = torch.ones(3, dtype=torch.float32)
weight[self.flags_obj.disable - 1] = 0
else:
weight = torch.stack([metric_play_time, metric_duration, metric_lfc])
while torch.min(weight) < 1:
weight = weight * 10
weight = torch.exp(-self.flags_obj.adapt * weight)
weight = weight / torch.sum(weight)
total_metric = metric_play_time * weight[0] + metric_duration * weight[1] + metric_lfc * weight[2]
for i, param in enumerate(self.recommender.labeler.parameters()):
param.data = param.data + self.lr_label * (grads_alpha_play_time[i] * weight[0] + grads_alpha_duration[i] * weight[1] + grads_alpha_lfc[i] * weight[2])
#t3 = time()
#print("Time: substep2 ", t3-t2)
#self.rec_optimizer.zero_grad()
with torch.no_grad():
labels = self.recommender.labeler(feedbacks).squeeze(-1)
loss = self.recommender.model.forward(input_data, labels)
loss_train += loss.detach().cpu().numpy()
grads_final = torch.autograd.grad(loss, self.recommender.model.parameters())
for i, param in enumerate(self.recommender.model.parameters()):
param.data = param.data - self.lr_rec * grads_final[i]
#t4 = time()
#print("Time: substep3 ", t4-t3)
print("epoch:{}, step:{}, loss_assume:{}".format(epoch, step, loss_assume.data))
print("metric_play_time:{}, metric_duration:{}, metric_lfc:{}".format(metric_play_time, metric_duration, metric_lfc))
print("weight:{}, total_metric:{}".format(weight.data.cpu().numpy(), total_metric))
print("loss:{}".format(loss))
print("\n")
return loss_train / step
def get_loss(self, dataloader):
with torch.no_grad():
model = self.recommender.model
labeler = self.recommender.labeler
result = 0
for step, sample in enumerate(dataloader):
sample = [[k.to(self.device) for k in i] if type(i) == list else i.to(self.device) for i in sample]
input_data, feedbacks = sample[:-1], sample[-1]
labels = labeler(feedbacks).squeeze(-1)
loss = model.forward(input_data, labels)
result += loss.detach().cpu().numpy()
return result / step
def flatten_and_concatenate(self, tensor_list):
flat_tensor_list = []
for tensor in tensor_list:
flat_tensor = torch.flatten(tensor)
flat_tensor_list.append(flat_tensor)
concatenated_tensor = torch.cat(flat_tensor_list, dim=0)
return concatenated_tensor
def get_metric(self, result, k=10):
total_play_time = []
total_duration = []
total_lfc = []
for user, info in result.items():
items_info = info[0] # id,duration,tag
feedbacks_info = info[1] # play_time, duration, like follow comment
play_time = feedbacks_info[:, 0].float()
duration = feedbacks_info[:, 1].float()
lfc = feedbacks_info[:, 2].float()
scores = info[-1]
N = items_info.shape[0]
if N < k:
continue
is_topk = self.topk_model(scores.unsqueeze(dim=0))[0]
is_topk = torch.sum(torch.sum(is_topk, dim=0), dim=1)
play_time = self.normalize_play_time(play_time)
duration = self.normalize_duration(duration)
total_play_time.append(torch.dot(is_topk, play_time)/k)
total_duration.append(torch.std(is_topk * duration))
total_lfc.append(torch.dot(is_topk, lfc)/k)
return total_play_time, total_duration, total_lfc
# Path: recommender.py
class Recommender(object):
def __init__(self, flags_obj, workspace, dm, nc=None):
self.dm = dm # dataset manager
self.model_name = flags_obj.model
self.flags_obj = flags_obj
self.load_model_config()
self.set_model()
self.set_labeler()
self.workspace = workspace
def load_model_config(self):
path = './config/{}_{}.yaml'.format(self.model_name, self.dm.dataset_name)
f = open(path)
self.model_config = yaml.load(f, Loader=yaml.FullLoader)
def set_model(self):
self.model = DIN(config=self.model_config)
def set_labeler(self):
self.labeler = Labeler(feedback_num=self.model_config['feedback_num'], dim_num=self.model_config['dim_num'])
def transfer_model(self, device):
self.model = self.model.to(device)
self.labeler = self.labeler.to(device)
def get_dataset(self, *args):
return getattr(data, f'DIN_Dataset')(*args)
# Path: utils/Context.py
import os
import torch
import config.const as const_util
from trainer import Trainer
from recommender import Recommender
class ContextManager(object):
def __init__(self, flags_obj):
self.workspace = flags_obj.workspace
self.set_workspace()
| def set_workspace(self): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: strollby/graphene-directives
# Path: graphene_directives/schema.py
class Schema(GrapheneSchema):
def __init__(
self,
query: graphene.ObjectType = None,
mutation: graphene.ObjectType = None,
subscription: graphene.ObjectType = None,
types: list[graphene.ObjectType] = None,
directives: Union[Collection[GraphQLDirective], None] = None,
auto_camelcase: bool = True,
schema_directives: Collection[SchemaDirective] = None,
include_graphql_spec_directives: bool = True,
):
"""
Schema Definition.
Args:
query (Type[ObjectType]): Root query *ObjectType*. Describes entry point for fields to *read*
data in your Schema.
mutation (Optional[Type[ObjectType]]): Root mutation *ObjectType*. Describes entry point for
fields to *create, update or delete* data in your API.
subscription (Optional[Type[ObjectType]]): Root subscription *ObjectType*. Describes entry point
for fields to receive continuous updates.
types (Optional[Collection[Type[ObjectType]]]): List of any types to include in schema that
may not be introspected through root types.
directives (List[GraphQLDirective], optional): List of custom directives to include in the
GraphQL schema.
auto_camelcase (bool): Fieldnames will be transformed in Schema's TypeMap from snake_case
to camelCase (preferred by GraphQL standard). Default True.
schema_directives (Collection[SchemaDirective]): Directives that can be defined at DIRECTIVE_LOCATION.SCHEMA
with their argument values.
include_graphql_spec_directives (bool): Includes directives defined by GraphQL spec (@include, @skip,
@deprecated, @specifiedBy)
"""
self.custom_directives = directives or []
self.schema_directives = schema_directives or []
self.auto_camelcase = auto_camelcase
self.directives_used: dict[str, GraphQLDirective] = {}
directives = tuple(self.custom_directives) + (
tuple(specified_directives) if include_graphql_spec_directives else ()
)
super().__init__(
query=query,
mutation=mutation,
subscription=subscription,
types=types,
directives=directives,
auto_camelcase=auto_camelcase,
)
def field_name_to_type_attribute(
self, model: graphene.ObjectType
) -> Callable[[str], str]:
"""
Create field name conversion method (from schema name to actual graphene_type attribute name).
Args:
model (ObjectType): model whose field name is to be converted
Returns:
(str) -> (str)
"""
field_names = {}
if self.auto_camelcase:
field_names = {
to_camel_case(attr_name): attr_name
for attr_name in getattr(model._meta, "fields", []) # noqa
}
return lambda schema_field_name: field_names.get(
schema_field_name, schema_field_name
)
def type_attribute_to_field_name(self, attribute: str) -> str:
"""
Create a conversion method to convert from graphene_type attribute name to the schema field name.
"""
if self.auto_camelcase:
return to_camel_case(attribute)
return attribute
def _add_argument_decorators(
self,
entity_name: str,
required_directive_field_types: set[DirectiveLocation],
args: dict[str, GraphQLArgument],
) -> str:
"""
For a given field, go through all its args and see if any directive decorator needs to be added.
"""
if not args:
return ""
# If every arg does not have a description, print them on one line.
print_single_line = not any(arg.description for arg in args.values())
indentation: str = " "
new_args = []
str_field = "(" if print_single_line else "(\n"
for i, (name, arg) in enumerate(args.items()):
if print_single_line:
base_str = f"{print_input_value(name, arg)} "
else:
base_str = (
print_description(arg, f" {indentation}", not i)
+ f" {indentation}"
+ f"{print_input_value(name, arg)} "
)
directives = []
for directive in self.custom_directives:
if has_field_attribute(arg, directive):
directive_values = get_field_attribute_value(arg, directive)
meta_data: CustomDirectiveMeta = getattr(
directive, "_graphene_directive"
)
if (
not required_directive_field_types.intersection(
set(directive.locations)
)
and len(required_directive_field_types) != 0
):
raise DirectiveValidationError(
"\n".join(
[
f"{str(directive)} cannot be used at argument {name} level",
f"\tat {entity_name}",
f"\tallowed: {directive.locations}",
f"\trequired: {required_directive_field_types}",
]
)
)
for directive_value in directive_values:
if meta_data.input_transform is not None:
directive_value = arg_camel_case(
meta_data.input_transform(
arg_snake_case(directive_value), self
)
)
directive_str = decorator_string(directive, **directive_value)
directives.append(directive_str)
new_args.append(base_str + " ".join(directives))
if print_single_line:
str_field += ", ".join(new_args) + ")"
else:
str_field += "\n".join(new_args) + f"\n{indentation})"
return str_field
def _add_field_decorators(self, graphene_types: set, string_schema: str) -> str:
"""
For a given entity, go through all its fields and see if any directive decorator needs to be added.
This method simply goes through the fields that need to be modified and replace them with their annotated
version in the schema string representation.
"""
for graphene_type in graphene_types:
entity_name = graphene_type._meta.name # noqa
entity_type = self.graphql_schema.get_type(entity_name)
get_field_graphene_type = self.field_name_to_type_attribute(graphene_type)
required_directive_locations = set()
if is_object_type(entity_type) or is_interface_type(entity_type):
required_directive_locations.union(
{
DirectiveLocation.FIELD_DEFINITION,
DirectiveLocation.ARGUMENT_DEFINITION,
}
)
elif is_enum_type(entity_type):
required_directive_locations.add(DirectiveLocation.ENUM_VALUE)
elif is_input_type(entity_type):
required_directive_locations.add(
DirectiveLocation.INPUT_FIELD_DEFINITION
)
else:
continue
if is_enum_type(entity_type):
fields: dict = entity_type.values
else:
fields: dict = entity_type.fields
str_fields = []
for field_name, field in fields.items():
if is_enum_type(entity_type):
str_field = enum_type_to_fields_string(
get_single_field_type(
entity_type, field_name, field, is_enum_type=True
)
)
elif isinstance(field, GraphQLInputField):
str_field = input_type_to_fields_string(
get_single_field_type(entity_type, field_name, field)
)
elif isinstance(field, GraphQLField):
str_field = entity_type_to_fields_string(
get_single_field_type(entity_type, field_name, field)
)
# Replace Arguments with directives
if hasattr(entity_type, "_fields"):
_arg = entity_type._fields.args[0] # noqa
if hasattr(_arg, self.type_attribute_to_field_name(field_name)):
arg_field = getattr(
_arg, self.type_attribute_to_field_name(field_name)
)
else:
arg_field = {}
if (
hasattr(arg_field, "args")
and arg_field.args is not None
and isinstance(arg_field.args, dict)
):
original_args = print_args(
args=field.args, indentation=" "
)
replacement_args = self._add_argument_decorators(
entity_name=entity_name,
required_directive_field_types=required_directive_locations,
args=arg_field.args,
)
str_field = str_field.replace(
original_args, replacement_args
)
else:
continue
# Check if we need to annotate the field by checking if it has the decorator attribute set on the field.
field = getattr(
graphene_type, get_field_graphene_type(field_name), None
)
if field is None:
# Append the string, but skip the directives
str_fields.append(str_field)
continue
for directive in self.custom_directives:
if not has_field_attribute(field, directive):
continue
directive_values = get_field_attribute_value(field, directive)
meta_data: CustomDirectiveMeta = getattr(
directive, "_graphene_directive"
)
if (
not required_directive_locations.intersection(
set(directive.locations)
)
and len(required_directive_locations) != 0
):
raise DirectiveValidationError(
"\n".join(
[
f"{str(directive)} cannot be used at field level",
f"\tat {entity_name}",
f"\tallowed: {directive.locations}",
f"\trequired: {required_directive_locations}",
]
)
)
for directive_value in directive_values:
if (
meta_data.field_validator is not None
and not meta_data.field_validator(
entity_type,
field,
arg_snake_case(directive_value),
self,
)
):
raise DirectiveCustomValidationError(
", ".join(
[
f"Custom Validation Failed for {str(directive)} with args: ({directive_value})"
f"at field level {entity_name}:{field}"
]
)
)
if meta_data.input_transform is not None:
directive_value = arg_camel_case(
meta_data.input_transform(
arg_snake_case(directive_value), self
)
)
str_field += (
f" {decorator_string(directive, **directive_value)}"
)
str_fields.append(str_field)
str_fields_annotated = "\n".join(str_fields)
# Replace the original field declaration by the annotated one
if is_object_type(entity_type):
entity_type_name = "type"
str_fields_original = entity_type_to_fields_string(entity_type)
elif is_interface_type(entity_type):
entity_type_name = "interface"
str_fields_original = entity_type_to_fields_string(entity_type)
elif is_enum_type(entity_type):
entity_type_name = "enum"
str_fields_original = enum_type_to_fields_string(entity_type)
elif is_input_type(entity_type):
entity_type_name = "input"
str_fields_original = input_type_to_fields_string(entity_type)
else:
continue
pattern = re.compile(
r"(%s\s%s\s[^\{]*)\{\s*%s\s*\}" # noqa
% (entity_type_name, entity_name, re.escape(str_fields_original))
)
string_schema = pattern.sub(
r"\g<1> {\n%s\n}" % str_fields_annotated, string_schema
)
return string_schema
def add_non_field_decorators(
self, non_fields_type: set[GraphQLNamedType], string_schema: str
) -> str:
for non_field in non_fields_type:
entity_name = non_field._meta.name # noqa
entity_type = self.graphql_schema.get_type(entity_name)
required_directive_locations = set()
if is_scalar_type(entity_type):
non_field_pattern = rf"(scalar {entity_name})"
required_directive_locations.add(DirectiveLocation.SCALAR)
elif is_union_type(entity_type):
non_field_pattern = rf"(union {entity_name} )"
required_directive_locations.add(DirectiveLocation.UNION)
elif is_object_type(entity_type):
non_field_pattern = rf"(type {entity_name} [^\{{]*)"
required_directive_locations.add(DirectiveLocation.OBJECT)
elif is_interface_type(entity_type):
non_field_pattern = rf"(interface {entity_name} [^\{{]*)"
required_directive_locations.add(DirectiveLocation.INTERFACE)
elif is_enum_type(entity_type):
non_field_pattern = rf"(enum {entity_name} [^\{{]*)"
required_directive_locations.add(DirectiveLocation.ENUM)
elif is_input_type(entity_type):
non_field_pattern = rf"(input {entity_name} [^\{{]*)"
required_directive_locations.add(DirectiveLocation.INPUT_OBJECT)
else:
continue
directive_annotations = []
for directive in self.custom_directives:
if has_non_field_attribute(non_field, directive):
meta_data: CustomDirectiveMeta = getattr(
directive, "_graphene_directive"
)
directive_values = get_non_field_attribute_value(
non_field, directive
)
if (
not required_directive_locations.intersection(
set(directive.locations)
)
and len(required_directive_locations) != 0
):
raise DirectiveValidationError(
"\n".join(
[
f"{str(directive)} cannot be used at non field level",
f"\tat {entity_name}",
f"\tallowed: {directive.locations}",
f"\trequired: {required_directive_locations}",
]
)
)
for directive_value in directive_values:
if (
meta_data.non_field_validator is not None
and not meta_data.non_field_validator(
non_field, arg_snake_case(directive_value), self
)
):
raise DirectiveCustomValidationError(
", ".join(
[
f"Custom Validation Failed for {str(directive)} with args: ({directive_value})"
f"at non-field level {entity_name}"
]
)
)
if meta_data.input_transform is not None:
directive_value = arg_camel_case(
meta_data.input_transform(
arg_snake_case(directive_value), self
)
)
directive_annotations.append(
f"{decorator_string(directive, **directive_value)}"
)
annotation = " ".join(directive_annotations)
annotation = (
f" {annotation}" if is_scalar_type(entity_type) else f"{annotation} "
)
replace_str = rf"\1{annotation}"
pattern = re.compile(non_field_pattern)
string_schema = pattern.sub(replace_str, string_schema)
return string_schema
def _get_directive_applied_non_field_types(self) -> set:
"""
Find all the directive applied non-field types from the schema.
"""
directives_types = set()
schema_types = {
**self.graphql_schema.type_map,
**{
"Query": self.graphql_schema.query_type,
"Mutation": self.graphql_schema.mutation_type,
},
}
for schema_type in schema_types.values():
if not hasattr(schema_type, "graphene_type"):
continue
for directive in self.custom_directives:
if has_non_field_attribute(schema_type.graphene_type, directive):
self.directives_used[directive.name] = directive
directives_types.add(schema_type.graphene_type)
return directives_types
def _get_directive_applied_field_types(self) -> set:
"""
Find all the directive applied field types from the schema.
"""
directives_fields = set()
schema_types = {
**self.graphql_schema.type_map,
**{
"Query": self.graphql_schema.query_type, # noqa
"Mutation": self.graphql_schema.mutation_type, # noqa
},
}
for _, entity_type in schema_types.items():
if (
not hasattr(entity_type, "graphene_type") # noqa:SIM101
or isinstance(entity_type.graphene_type._meta, UnionOptions) # noqa
or isinstance(entity_type.graphene_type._meta, ScalarOptions) # noqa
):
continue
fields = (
list(entity_type.values.values()) # Enum class fields
if is_enum_type(entity_type)
else list(entity_type.fields) # noqa
)
for field in fields:
field_type = (
# auto-camelcasing can cause problems
getattr(entity_type.graphene_type, to_camel_case(field), None)
or getattr(entity_type.graphene_type, to_snake_case(field), None)
if not is_enum_type(entity_type)
else field.value
)
for directive_ in self.custom_directives:
if has_field_attribute(field_type, directive_):
self.directives_used[directive_.name] = directive_
directives_fields.add(entity_type.graphene_type)
# Handle Argument Decorators
if (
hasattr(field_type, "args")
and field_type.args is not None
and isinstance(field_type.args, dict)
):
for arg_name, arg_type in field_type.args.items():
if has_field_attribute(arg_type, directive_):
if (
DirectiveLocation.ARGUMENT_DEFINITION
not in directive_.locations
):
raise DirectiveValidationError(
f"{directive_} cannot be used at argument level at {entity_type}->{field}"
)
self.directives_used[directive_.name] = directive_
directives_fields.add(entity_type.graphene_type)
return directives_fields
def get_directives_used(self) -> list[GraphQLDirective]:
"""
Returns a list of directives used in the schema
"""
self._get_directive_applied_field_types()
self._get_directive_applied_non_field_types()
return list(self.directives_used.values())
def __str__(self):
string_schema = ""
string_schema += extend_schema_string(string_schema, self.schema_directives)
string_schema += print_schema(self.graphql_schema)
field_types = self._get_directive_applied_field_types()
non_field_types = self._get_directive_applied_non_field_types()
string_schema = self._add_field_decorators(field_types, string_schema)
string_schema = self.add_non_field_decorators(non_field_types, string_schema)
for directive in self.custom_directives:
meta_data: CustomDirectiveMeta = getattr(directive, "_graphene_directive")
if not meta_data.add_definition_to_schema:
string_schema = string_schema.replace(
print_directive(directive) + "\n\n", ""
)
return string_schema.strip()
# Path: graphene_directives/data_models/schema_directive.py
class SchemaDirective:
target_directive: GraphQLDirective
arguments: dict[str, Any]
def __post_init__(self):
if GrapheneDirectiveLocation.SCHEMA not in self.target_directive.locations:
raise DirectiveValidationError(
". ".join(
[
f"{self.target_directive} cannot be used as schema directive",
"Missing DirectiveLocation.SCHEMA in locations",
]
)
)
# Path: graphene_directives/directive.py
def CustomDirective( # noqa
name: str,
locations: Collection[DirectiveLocation],
args: Optional[Dict[str, GraphQLArgument]] = None,
is_repeatable: bool = False,
description: Optional[str] = None,
extensions: Optional[Dict[str, Any]] = None,
ast_node: Optional[ast.DirectiveDefinitionNode] = None,
allow_all_directive_locations: bool = False,
add_definition_to_schema: bool = True,
non_field_validator: Callable[[Any, dict[str, Any], Any], bool] = None,
field_validator: Callable[[Any, Any, dict[str, Any], Any], bool] = None,
input_transform: Callable[[dict[str, Any], Any], dict[str, Any]] = None,
) -> GraphQLDirective:
def directive(
target_directive: GraphQLDirective, *, field: Optional[Any] = None, **_kwargs: Any
) -> Callable:
def decorator(type_: Any) -> Any:
def directive_decorator(target_directive: GraphQLDirective) -> directive:
# Path: graphene_directives/exceptions.py
class DirectiveCustomValidationError(Exception):
def __init__(self, message: str):
super().__init__(message)
# Path: graphene_directives/exceptions.py
class DirectiveValidationError(Exception):
def __init__(self, message: str):
super().__init__(message)
# Path: graphene_directives/parsers.py
def arg_camel_case(inputs: dict) -> dict:
return {to_camel_case(k): v for k, v in inputs.items()}
# Path: graphene_directives/parsers.py
def arg_snake_case(inputs: dict) -> dict:
return {to_snake_case(k): v for k, v in inputs.items()}
# Path: graphene_directives/parsers.py
def decorator_string(directive: GraphQLDirective, **kwargs: dict) -> str:
directive_name = str(directive)
if len(directive.args) == 0:
return directive_name
# Format each keyword argument as a string, considering its type
formatted_args = [
(
f"{to_camel_case(key)}: "
+ (f'"{value}"' if isinstance(value, str) else json.dumps(value))
)
for key, value in kwargs.items()
if value is not None and to_camel_case(key) in directive.args
]
# Construct the directive string
return f"{directive_name}({', '.join(formatted_args)})"
# Path: graphene_directives/parsers.py
def entity_type_to_fields_string(
field: Union[GraphQLObjectType, GraphQLInterfaceType],
) -> str:
return _remove_block(print_fields(field))
# Path: graphene_directives/parsers.py
def enum_type_to_fields_string(graphene_type: GraphQLEnumType) -> str:
fields = print_enum(graphene_type).replace(
print_description(graphene_type) + f"enum {graphene_type.name}", ""
)
return _remove_block(fields)
# Path: graphene_directives/parsers.py
def extend_schema_string(
string_schema: str, schema_directives: Collection[SchemaDirective]
) -> str:
schema_directives_strings = []
for schema_directive in schema_directives:
args = parse_argument_values(
schema_directive.target_directive,
{
to_camel_case(field): value
for (field, value) in schema_directive.arguments.items()
},
)
schema_directives_strings.append(
"\t" + decorator_string(schema_directive.target_directive, **args)
)
if len(schema_directives_strings) != 0:
string_schema += (
"extend schema\n" + "\n".join(schema_directives_strings) + "\n\n"
)
return string_schema
# Path: graphene_directives/parsers.py
def input_type_to_fields_string(graphene_type: GraphQLInputObjectType) -> str:
fields = print_input_object(graphene_type).replace(
print_description(graphene_type) + f"input {graphene_type.name}", ""
)
return _remove_block(fields)
# Path: graphene_directives/utils.py
def get_field_attribute_value(
type_: Any, target_directive: GraphQLDirective
) -> list[dict]:
return getattr(type_, field_attribute_name(target_directive))
# Path: graphene_directives/utils.py
def get_non_field_attribute_value(
type_: Any, target_directive: GraphQLDirective
) -> list[dict]:
return getattr(type_, non_field_attribute_name(target_directive))
# Path: graphene_directives/utils.py
def get_single_field_type(
entity: Union[GraphQLEnumType, GraphQLInputObjectType, GraphQLObjectType],
field_name: str,
field_type: Union[GraphQLInputField, GraphQLField],
is_enum_type: bool = False,
) -> Union[GraphQLEnumType, GraphQLInputObjectType, GraphQLObjectType]:
"""
Generates the schema for a type with just one given field
"""
new_entity = deepcopy(entity)
setattr(
new_entity, "values" if is_enum_type else "fields", {field_name: field_type}
)
return new_entity
# Path: graphene_directives/utils.py
def has_field_attribute(type_: Any, target_directive: GraphQLDirective) -> bool:
return hasattr(type_, field_attribute_name(target_directive))
# Path: graphene_directives/utils.py
def has_non_field_attribute(type_: Any, target_directive: GraphQLDirective) -> bool:
return hasattr(type_, non_field_attribute_name(target_directive))
# Path: graphene_directives/schema.py
import re
import graphene
from typing import Callable, Collection, Union
from graphene import Schema as GrapheneSchema
from graphene.types.scalars import ScalarOptions
from graphene.types.union import UnionOptions
from graphene.utils.str_converters import to_camel_case, to_snake_case
from graphql import (
DirectiveLocation,
GraphQLArgument,
GraphQLDirective,
GraphQLField,
GraphQLInputField,
GraphQLNamedType,
is_enum_type,
is_input_type,
is_interface_type,
is_object_type,
is_scalar_type,
is_union_type,
print_schema,
)
from graphql import specified_directives
from graphql.utilities.print_schema import (
print_args,
print_description,
print_directive,
print_input_value,
)
from .data_models.schema_directive import SchemaDirective
from .directive import CustomDirectiveMeta
from .exceptions import DirectiveCustomValidationError, DirectiveValidationError
from .parsers import (
arg_camel_case,
arg_snake_case,
decorator_string,
entity_type_to_fields_string,
enum_type_to_fields_string,
extend_schema_string,
input_type_to_fields_string,
)
from .utils import (
get_field_attribute_value,
get_non_field_attribute_value,
get_single_field_type,
has_field_attribute,
has_non_field_attribute,
)
class Schema(GrapheneSchema):
def __init__(
self,
query: graphene.ObjectType = None,
mutation: graphene.ObjectType = None,
subscription: graphene.ObjectType = None,
| types: list[graphene.ObjectType] = None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: weijiawu/CisDQ
# Path: mask2former/modeling/transformer_decoder/position_encoding.py
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask=None):
if mask is None:
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def __repr__(self, _repr_indent=4):
head = "Positional encoding " + self.__class__.__name__
body = [
"num_pos_feats: {}".format(self.num_pos_feats),
"temperature: {}".format(self.temperature),
"normalize: {}".format(self.normalize),
"scale: {}".format(self.scale),
]
# _repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
# Path: mask2former/modeling/transformer_decoder/transformer.py
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# Path: mask2former/modeling/transformer_decoder/transformer.py
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
# Path: mask2former/modeling/pixel_decoder/ops/modules/ms_deform_attn.py
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 128
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
try:
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
except:
# CPU
output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
# # For FLOPs calculation only
# output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
# Path: mask2former/modeling/pixel_decoder/msdeformattn.py
import logging
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from ..transformer_decoder.transformer import _get_clones, _get_activation_fn
from .ops.modules import MSDeformAttn
num_encoder_layers=6, dim_feedforward=1024, dropout=0.1,
activation="relu",
num_feature_levels=4, enc_n_points=4,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = MSDeformAttnTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points)
self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
normal_(self.level_embed)
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, pos_embeds):
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs]
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
return memory, spatial_shapes, level_start_index
class MSDeformAttnTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class MSDeformAttnTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
| ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KieDani/Towards_3D_Object_Localization
# Path: helper.py
def img_to_cam(coords, Mint, original_size, resized_size):
'''
coords: (..., 3) with ordering (y, x, z)
Mint: (3, 3) or (B, 3, 3)
original_size: order (height, width)
resized_size: order (height, width)
----------------
return: (..., 3) with ordering (y, x, z)
'''
coords_c = coords.clone()
coords_c[..., :2] = resized_to_original_keypopints(coords_c[..., :2], original_size, resized_size)
coords_c[..., [0, 1]] = coords_c[..., [1, 0]] * coords[..., 2:3]
if len(Mint.shape) == 3:
inv_Mint = torch.linalg.inv(Mint[:, :3, :3])
coords_c = torch.einsum('b i d, b ... d -> b ... i', inv_Mint, coords_c)
elif len(Mint.shape) == 2:
inv_Mint = torch.linalg.inv(Mint[:3, :3])
coords_c = torch.einsum('i d, ... d -> ... i', inv_Mint, coords_c)
else:
raise ValueError('Mint should be 2D or 3D tensor')
coords_c[..., [0, 1, 2]] = coords_c[..., [1, 0, 2]]
return coords_c
# Path: helper.py
def cam_to_world(coords_c, extrinsic_matrix):
#coords_c[..., [0, 1]] = coords_c[..., [1, 0]]
tmp = coords_c[..., [1, 0, 2]]
inverse_extrinsic_matrix = torch.linalg.inv(extrinsic_matrix)
#coords_c = torch.cat((coords_c, torch.ones_like(coords_c[..., 0:1])), dim=-1)
tmp = torch.cat((tmp, torch.ones_like(tmp[..., 0:1])), dim=-1)
if len(tmp.shape) == 3: inverse_extrinsic_matrix = inverse_extrinsic_matrix.unsqueeze(-3)
#coords_w = torch.einsum('i d, ... d -> ... i', inverse_extrinsic_matrix, coords_c)
coords_w = torch.einsum('... i d, ... d -> ... i', inverse_extrinsic_matrix, tmp)
coords_w = coords_w[..., :3] / coords_w[..., 3:4]
return coords_w
# Path: helper.py
def load_models(identifier, epoch, path=None, device='cuda:0'):
if path is None:
assert identifier is None and epoch is None
path = os.path.join(get_logs_path(), 'checkpoints', identifier, f'{epoch}.pth')
checkpoint = torch.load(path, map_location=device)
return checkpoint
# Path: helper.py
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
numpy.random.seed(worker_seed)
random.seed(worker_seed)
# Path: helper.py
def get_logs_path():
return logs_path
# Path: general/model.py
def get_PEN(name='resnet34', depth_mode='depthmap', environment_name=None):
assert name in ['resnet34', 'resnet50', 'convnext', 'hrnet', 'convnextv2']
if name in ['resnet34', 'resnet50', 'convnext', 'hrnet', 'convnextv2']:
model = ResNetPyramid(depth_mode=depth_mode, backbone=name)
else:
if depth_mode != 'depthmap':
raise NotImplementedError
else:
raise ValueError
if environment_name in ['realball']:
model.min_depth = 0.05
model.max_depth = 3
elif environment_name in ['parcour_singleenv_singlecam', 'parcour_singleenv_multicam', 'parcour_multienv_singlecam',
'parcour_multienv_multicam', 'falling', 'carousel', 'parcour_dualenv_multicam']:
model.min_depth = 2
model.max_depth = 12
else:
raise ValueError
return model
# Path: general/config.py
class EvalConfig(BaseConfig):
def __init__(self, sin_title, environment_name, identifier, folder):
super(EvalConfig, self).__init__()
self.sin_title = sin_title
self.environment_name = map_environment_name(environment_name)
self.ident = identifier
self.folder = folder
self.BATCH_SIZE = 1
# Path: general/transforms.py
class Normalize(object):
class Compose(object):
class DeNormalize(object):
def __init__(self, mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):
def __call__(self, x):
def __init__(self, transforms):
def __call__(self, x):
def __init__(self, mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):
def __call__(self, x):
# Path: paths.py
# Path: general/evaluate.py
import os
import argparse
import torch
import numpy as np
import einops as eo
import matplotlib.pyplot as plt
import io
import cv2
import random
import json
import general.dataset as my_dataset
from tqdm import tqdm
from helper import img_to_cam, cam_to_world, load_models, seed_worker, get_logs_path
from general.model import get_PEN
from general.config import EvalConfig
from torch.utils.tensorboard import SummaryWriter
from general.transforms import val_transform, denorm
from paths import checkpoint_path as ch_pa
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if imsave: save_image(img, f'img{t:04d}.png', im_path)
heat = np.uint8(heatmaps[t])
heat = eo.rearrange(heat, 'c h w -> h w c')
heat = np.uint8((heat - heatmaps.min()) * (255 / (heatmaps.max() - heatmaps.min())))
heat = cv2.applyColorMap(heat, cv2.COLORMAP_JET)
if imsave: save_image(heat, f'heat{t:04d}.png', im_path)
depth = depthmaps[t]
if len(depth.shape) > 3: #Filter if depth was regression output instead of depth_map
depth = np.zeros_like(heat)
else:
depth = eo.rearrange(depth, 'c h w -> h w c')
depth = np.uint8((depth - depthmaps.min()) * (255 / (depthmaps.max() - depthmaps.min()) + 1e-7))
depth = cv2.applyColorMap(depth, cv2.COLORMAP_JET)
if imsave: save_image(depth, f'depth{t:04d}.png', im_path)
plot2d = np.uint8(plots2d[t])
plot2d = cv2.cvtColor(plot2d, cv2.COLOR_BGRA2BGR)
if imsave: save_image(plot2d, f'plot2d{t:04d}.png', im_path, size=(1024, 1024))
plot2d = cv2.resize(plot2d, (W, H), interpolation=cv2.INTER_AREA)
plot3d_c = np.uint8(plots3d_c[t])
plot3d_c = cv2.cvtColor(plot3d_c, cv2.COLOR_BGRA2BGR)
if imsave: save_image(plot3d_c, f'plot3d_c{t:04d}.png', im_path, size=(1024, 1024))
plot3d_c = cv2.resize(plot3d_c, (W, H), interpolation=cv2.INTER_AREA)
plot3d_w = np.uint8(plots3d_w[t])
plot3d_w = cv2.cvtColor(plot3d_w, cv2.COLOR_BGRA2BGR)
if imsave: save_image(plot3d_w, f'plot3d_w{t:04d}.png', im_path, size=(1024, 1024))
plot3d_w = cv2.resize(plot3d_w, (W, H), interpolation=cv2.INTER_AREA)
frame = np.hstack((np.vstack((img, heat)), np.vstack((depth, plot2d)), np.vstack((plot3d_c, plot3d_w))))
out.write(frame)
out.release()
def save_image(image, name, path, size=(512, 512)):
image = cv2.resize(image, size, interpolation=cv2.INTER_AREA)
cv2.imwrite(os.path.join(path, name), image)
def calc_metrics(checkpoint_path, config, device='cpu'):
torch.manual_seed(42)
random.seed(42)
np.random.seed(42)
g = torch.Generator()
g.manual_seed(0)
num_bins = 3
min_depth, max_depth = (0.05, 1.5) if config.environment_name == 'realball' else (4., 10.)
checkpoint = load_models(None, None, checkpoint_path, device)
coord_model_state_dict = checkpoint['coord_model_state_dict']
coord_model = get_PEN(config.sin_title, environment_name=config.environment_name)
coord_model.load_state_dict(coord_model_state_dict)
coord_model = coord_model.to(device)
coord_model.eval()
testset = my_dataset.get_dataset(config.environment_name, mode='test', transforms=val_transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=config.BATCH_SIZE, shuffle=False, num_workers=8,
worker_init_fn=seed_worker, generator=g)
original_size = testset.original_size
with torch.no_grad():
num_cameras = testloader.sampler.data_source.num_cameras
num_environments = testloader.sampler.data_source.num_environments
with torch.no_grad():
distances = {i:{j:[] for j in range(num_cameras)} for i in range(num_environments)}
number = {i:{j:0 for j in range(num_cameras)} for i in range(num_environments)}
distances_bins = {i:{j:{k:[] for k in range(num_bins)} for j in range(num_cameras)} for i in range(num_environments)}
number_bins = {i:{j:{k:0 for k in range(num_bins)} for j in range(num_cameras)} for i in range(num_environments)}
for i, data_dict in enumerate(tqdm(testloader)):
for xml_num, stuff in data_dict.items():
video, vidlabel, vidheatmap, eM, iM, d3label, cam_num, timestamps = stuff
images_all, d3labels_all = video.to(device), d3label.to(device)
eM, iM = eM.to(device), iM.to(device)
__, T, __, H, W = images_all.shape
images_all = eo.rearrange(images_all, 'b t c h w -> (b t) c h w')
heatmap_all, depthmap_all = coord_model(images_all)
coords_all = coord_model.get_coords3D(heatmap_all, depthmap_all)
coords_all = eo.rearrange(coords_all, '(b t) d -> b t d', t=T)
coords_all = img_to_cam(coords_all, iM, original_size, (H, W))
distance = (coords_all - d3labels_all).double().pow(2).sum(2).sqrt()
for cn in range(cam_num.shape[0]):
cn_ind = cam_num[cn].item()
for t in range(distance.shape[1]):
distances[xml_num][cn_ind].append(distance[cn, t].item())
number[xml_num][cn_ind] += 1
bin_width = (max_depth - min_depth) / num_bins
for b in range(num_bins):
if min_depth + bin_width * b <= d3labels_all[cn, t, 2].item() < min_depth + bin_width * (b + 1):
distances_bins[xml_num][cn_ind][b].append(distance[cn, t].item())
number_bins[xml_num][cn_ind][b] += 1
merge = lambda x: [item for sublist in x for item in sublist]
# mean and std per camera
dtgs_cam = {xml_num:{cn:np.mean(distances[xml_num][cn]) for cn in range(num_cameras)} for xml_num in range(num_environments)}
dtgs_cam_std = {xml_num:{cn:np.std(distances[xml_num][cn]) for cn in range(num_cameras)} for xml_num in range(num_environments)}
dtgs_cam_bin = {xml_num:{cn:[np.mean(distances_bins[xml_num][cn][b]) for b in range(num_bins)] for cn in range(num_cameras)} for xml_num in range(num_environments)}
dtgs_cam_bin_std = {xml_num:{cn:[np.std(distances_bins[xml_num][cn][b]) for b in range(num_bins)] for cn in range(num_cameras)} for xml_num in range(num_environments)}
# mean and std per environment
dtgs_env = {xml_num:np.mean(merge([distances[xml_num][cn] for cn in range(num_cameras)]))
for xml_num in range(num_environments)}
dtgs_env_std = {xml_num:np.std(merge([distances[xml_num][cn] for cn in range(num_cameras)]))
for xml_num in range(num_environments)}
dtgs_env_bin = {xml_num:[np.mean(merge([distances_bins[xml_num][cn][b] for cn in range(num_cameras)]))
for b in range(num_bins)] for xml_num in range(num_environments)}
dtgs_env_bin_std = {xml_num:[np.std(merge([distances_bins[xml_num][cn][b] for cn in range(num_cameras)]))
for b in range(num_bins)] for xml_num in range(num_environments)}
# mean and std for the whole dataset
dtg = np.mean(merge([distances[xml_num][cn] for xml_num in range(num_environments) for cn in range(num_cameras)]))
dtg_std = np.std(merge([distances[xml_num][cn] for xml_num in range(num_environments) for cn in range(num_cameras)]))
| dtg_bin = [np.mean(merge([distances_bins[xml_num][cn][b] for xml_num in range(num_environments) for cn in range(num_cameras)])) for b in range(num_bins)] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aliyun/pai-python-sdk
# Path: pai/libs/alibabacloud_eas20210701/models.py
class CreateServiceRequest(TeaModel):
def __init__(
self,
develop: str = None,
labels: Dict[str, str] = None,
body: str = None,
):
self.develop = develop
self.labels = labels
self.body = body
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.develop is not None:
result['Develop'] = self.develop
if self.labels is not None:
result['Labels'] = self.labels
if self.body is not None:
result['body'] = self.body
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Develop') is not None:
self.develop = m.get('Develop')
if m.get('Labels') is not None:
self.labels = m.get('Labels')
if m.get('body') is not None:
self.body = m.get('body')
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class CreateServiceResponseBody(TeaModel):
def __init__(
self,
internet_endpoint: str = None,
intranet_endpoint: str = None,
region: str = None,
request_id: str = None,
service_id: str = None,
service_name: str = None,
status: str = None,
):
self.internet_endpoint = internet_endpoint
self.intranet_endpoint = intranet_endpoint
self.region = region
self.request_id = request_id
self.service_id = service_id
self.service_name = service_name
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.internet_endpoint is not None:
result['InternetEndpoint'] = self.internet_endpoint
if self.intranet_endpoint is not None:
result['IntranetEndpoint'] = self.intranet_endpoint
if self.region is not None:
result['Region'] = self.region
if self.request_id is not None:
result['RequestId'] = self.request_id
if self.service_id is not None:
result['ServiceId'] = self.service_id
if self.service_name is not None:
result['ServiceName'] = self.service_name
if self.status is not None:
result['Status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('InternetEndpoint') is not None:
self.internet_endpoint = m.get('InternetEndpoint')
if m.get('IntranetEndpoint') is not None:
self.intranet_endpoint = m.get('IntranetEndpoint')
if m.get('Region') is not None:
self.region = m.get('Region')
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
if m.get('ServiceId') is not None:
self.service_id = m.get('ServiceId')
if m.get('ServiceName') is not None:
self.service_name = m.get('ServiceName')
if m.get('Status') is not None:
self.status = m.get('Status')
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class DescribeMachineSpecRequest(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
query: DescribeMachineSpecQuery = None,
):
self.headers = headers
self.query = query
def validate(self):
if self.query:
self.query.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.query is not None:
result['query'] = self.query.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('query') is not None:
temp_model = DescribeMachineSpecQuery()
self.query = temp_model.from_map(m['query'])
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class DescribeMachineSpecResponseBody(TeaModel):
def __init__(
self,
request_id: str = None,
instance_metas: List[DescribeMachineSpecResponseBodyInstanceMetas] = None,
types: List[DescribeMachineSpecResponseBodyTypes] = None,
):
self.request_id = request_id
self.instance_metas = instance_metas
self.types = types
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.instance_metas, 'instance_metas')
if self.instance_metas:
for k in self.instance_metas:
if k:
k.validate()
self.validate_required(self.types, 'types')
if self.types:
for k in self.types:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.request_id is not None:
result['RequestId'] = self.request_id
result['InstanceMetas'] = []
if self.instance_metas is not None:
for k in self.instance_metas:
result['InstanceMetas'].append(k.to_map() if k else None)
result['Types'] = []
if self.types is not None:
for k in self.types:
result['Types'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
self.instance_metas = []
if m.get('InstanceMetas') is not None:
for k in m.get('InstanceMetas'):
temp_model = DescribeMachineSpecResponseBodyInstanceMetas()
self.instance_metas.append(temp_model.from_map(k))
self.types = []
if m.get('Types') is not None:
for k in m.get('Types'):
temp_model = DescribeMachineSpecResponseBodyTypes()
self.types.append(temp_model.from_map(k))
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class ListServicesRequest(TeaModel):
def __init__(
self,
filter: str = None,
group_name: str = None,
label: Dict[str, str] = None,
order: str = None,
page_number: int = None,
page_size: int = None,
parent_service_uid: str = None,
service_type: str = None,
sort: str = None,
):
# 关键字搜索。
self.filter = filter
# 所属的group。
self.group_name = group_name
self.label = label
# 排序顺序,支持升序或将序。
self.order = order
# 页号。
self.page_number = page_number
# 每页大小。
self.page_size = page_size
# Band类型服务主服务的UID
self.parent_service_uid = parent_service_uid
# 服务的类型,例如Async, OfflineTask和Standard等
self.service_type = service_type
# 排序字段。
self.sort = sort
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.filter is not None:
result['Filter'] = self.filter
if self.group_name is not None:
result['GroupName'] = self.group_name
if self.label is not None:
result['Label'] = self.label
if self.order is not None:
result['Order'] = self.order
if self.page_number is not None:
result['PageNumber'] = self.page_number
if self.page_size is not None:
result['PageSize'] = self.page_size
if self.parent_service_uid is not None:
result['ParentServiceUid'] = self.parent_service_uid
if self.service_type is not None:
result['ServiceType'] = self.service_type
if self.sort is not None:
result['Sort'] = self.sort
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Filter') is not None:
self.filter = m.get('Filter')
if m.get('GroupName') is not None:
self.group_name = m.get('GroupName')
if m.get('Label') is not None:
self.label = m.get('Label')
if m.get('Order') is not None:
self.order = m.get('Order')
if m.get('PageNumber') is not None:
self.page_number = m.get('PageNumber')
if m.get('PageSize') is not None:
self.page_size = m.get('PageSize')
if m.get('ParentServiceUid') is not None:
self.parent_service_uid = m.get('ParentServiceUid')
if m.get('ServiceType') is not None:
self.service_type = m.get('ServiceType')
if m.get('Sort') is not None:
self.sort = m.get('Sort')
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class ListServicesResponseBody(TeaModel):
def __init__(
self,
page_number: int = None,
page_size: int = None,
request_id: str = None,
services: List[Service] = None,
total_count: int = None,
):
# 页码。
self.page_number = page_number
# 每页显示的服务数。
self.page_size = page_size
# 请求ID。
self.request_id = request_id
# 服务列表。
self.services = services
# 服务总数。
self.total_count = total_count
def validate(self):
if self.services:
for k in self.services:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.page_number is not None:
result['PageNumber'] = self.page_number
if self.page_size is not None:
result['PageSize'] = self.page_size
if self.request_id is not None:
result['RequestId'] = self.request_id
result['Services'] = []
if self.services is not None:
for k in self.services:
result['Services'].append(k.to_map() if k else None)
if self.total_count is not None:
result['TotalCount'] = self.total_count
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('PageNumber') is not None:
self.page_number = m.get('PageNumber')
if m.get('PageSize') is not None:
self.page_size = m.get('PageSize')
if m.get('RequestId') is not None:
self.request_id = m.get('RequestId')
self.services = []
if m.get('Services') is not None:
for k in m.get('Services'):
temp_model = Service()
self.services.append(temp_model.from_map(k))
if m.get('TotalCount') is not None:
self.total_count = m.get('TotalCount')
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class ReleaseServiceRequest(TeaModel):
def __init__(
self,
traffic_state: str = None,
weight: int = None,
):
self.traffic_state = traffic_state
self.weight = weight
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.traffic_state is not None:
result['TrafficState'] = self.traffic_state
if self.weight is not None:
result['Weight'] = self.weight
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('TrafficState') is not None:
self.traffic_state = m.get('TrafficState')
if m.get('Weight') is not None:
self.weight = m.get('Weight')
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class UpdateServiceRequest(TeaModel):
def __init__(
self,
body: str = None,
):
self.body = body
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.body is not None:
result['body'] = self.body
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('body') is not None:
self.body = m.get('body')
return self
# Path: pai/libs/alibabacloud_eas20210701/models.py
class UpdateServiceVersionRequest(TeaModel):
def __init__(
self,
version: int = None,
):
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.version is not None:
result['Version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('Version') is not None:
self.version = m.get('Version')
return self
# Path: pai/api/base.py
class PaginatedResult(object):
"""A class represent response of a pagination call to PAI service."""
items: List[Union[Dict[str, Any], str]] = None
total_count: int = None
def __init__(self, items: List[Union[Dict[str, Any], str]], total_count: int):
self.items = items
self.total_count = total_count
# Path: pai/api/base.py
class ResourceAPI(with_metaclass(ABCMeta, object)):
"""Class that provide APIs to operate the resource."""
BACKEND_SERVICE_NAME = None
def __init__(
self,
acs_client: Client,
header: Optional[Dict[str, str]] = None,
runtime: Optional[RuntimeOptions] = None,
):
"""Initialize a ResourceAPI object.
Args:
acs_client (Client): A basic client used to communicate with a specific PAI
service.
header (Dict[str, str], optional): Header set in the HTTP request. Defaults
to None.
runtime (RuntimeOptions, optional): Options configured for the client
runtime behavior, such as read_timeout, connection_timeout, etc.
Defaults to None.
"""
self.acs_client = acs_client
self.header = header
self.runtime = runtime
def _make_extra_request_options(self):
"""Returns headers and runtime for client."""
return self.header or dict(), self.runtime or RuntimeOptions()
def _do_request(self, method_: str, *args, **kwargs):
headers, runtime = self._make_extra_request_options()
if "headers" not in kwargs:
kwargs["headers"] = headers
if "runtime" not in kwargs:
kwargs["runtime"] = runtime
request_method = getattr(self.acs_client, method_)
return request_method(*args, **kwargs).body
def get_api_object_by_resource_id(self, resource_id):
raise NotImplementedError
def refresh_entity(self, id_, entity):
"""Refresh entity using API object from service."""
if not isinstance(id_, six.string_types) and not isinstance(
id_, six.integer_types
):
raise ValueError(
"Expected integer type or string type for id, but given type %s"
% type(id_)
)
api_obj = self.get_api_object_by_resource_id(resource_id=id_)
return entity.patch_from_api_object(api_obj)
@classmethod
def make_paginated_result(
cls,
data: Union[Dict[str, Any], TeaModel],
item_key=None,
) -> "PaginatedResult":
"""Make a paginated result from response.
Args:
data: Response data.
item_key:
Returns:
"""
if isinstance(data, TeaModel):
data = data.to_map()
total_count = data.pop("TotalCount")
if item_key:
items = data[item_key]
else:
values = list([val for val in data.values() if isinstance(val, list)])
if len(values) != 1:
raise ValueError("Requires item key to make paginated result.")
items = values[0]
return PaginatedResult(items=items, total_count=total_count)
# Path: pai/api/base.py
class ServiceName(object):
# Service provided by PAI.
PAI_DLC = "pai-dlc"
PAI_EAS = "pai-eas"
PAI_WORKSPACE = "aiworkspace"
PAI_STUDIO = "pai"
PAIFLOW = "paiflow"
# Other services provided by Alibaba Cloud.
STS = "sts"
# Path: pai/api/service.py
import json
import logging
import typing
from typing import Any, Dict, Union
from ..libs.alibabacloud_eas20210701.models import (
CreateServiceRequest,
CreateServiceResponseBody,
DescribeMachineSpecRequest,
DescribeMachineSpecResponseBody,
ListServicesRequest,
ListServicesResponseBody,
ReleaseServiceRequest,
UpdateServiceRequest,
UpdateServiceVersionRequest,
)
from .base import PaginatedResult, ResourceAPI, ServiceName
# Copyright 2023 Alibaba, Inc. or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.getLogger(__name__)
class ServiceAPI(ResourceAPI):
BACKEND_SERVICE_NAME = ServiceName.PAI_EAS
_create_method = "create_service_with_options"
_update_method = "update_service_with_options"
_update_version_method = "update_service_version_with_options"
_list_method = "list_services_with_options"
_get_method = "describe_service_with_options"
_start_method = "start_service_with_options"
_stop_method = "stop_service_with_options"
_delete_method = "delete_service_with_options"
_release_method = "release_service_with_options"
_get_group_method = "describe_group_with_options"
_list_groups_method = "list_group_with_options"
_describe_machine_method = "describe_machine_spec_with_options"
def __init__(self, region_id, acs_client, **kwargs):
super(ServiceAPI, self).__init__(acs_client=acs_client, **kwargs)
| self.region_id = region_id |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mpenning/ciscoconfparse2
# Path: ciscoconfparse2/protocol_values.py
ASA_TCP_PORTS = {
"aol": 5190,
"bgp": 179,
"chargen": 19,
"cifs": 3020,
"citrix-ica": 1494,
"cmd": 514,
"ctiqbe": 2748,
"daytime": 13,
"discard": 9,
"domain": 53,
"echo": 7,
"exec": 512,
"finger": 79,
"ftp": 20,
"ftp-data": 21,
"gopher": 70,
"h323": 1720,
"hostname": 101,
"http": 80,
"https": 443,
"ident": 113,
"imap4": 143,
"irc": 194,
"kerberos": 88,
"klogin": 543,
"kshell": 544,
"ldap": 389,
"ldaps": 636,
"login": 513,
"lotusnotes": 1352,
"lpd": 515,
"netbios-ssn": 139,
"nfs": 2049,
"nntp": 119,
"pcanywhere-data": 5631,
"pim-auto-rp": 496,
"pop2": 109,
"pop3": 110,
"pptp": 1723,
"rsh": 514,
"rtsp": 554,
"sip": 5060,
"smtp": 25,
"sqlnet": 1521,
"ssh": 22,
"sunrpc": 111,
"tacacs": 49,
"talk": 517,
"telnet": 23,
"uucp": 540,
"whois": 43,
"www": 80,
}
# Path: ciscoconfparse2/protocol_values.py
ASA_UDP_PORTS = {
"biff": 512,
"bootpc": 68,
"bootps": 67,
"cifs": 3020,
"discard": 9,
"dnsix": 90,
"domain": 53,
"echo": 7,
"http": 80,
"isakmp": 500,
"kerberos": 88,
"mobile-ip": 434,
"nameserver": 42,
"netbios-dgm": 138,
"netbios-ns": 137,
"nfs": 2049,
"ntp": 123,
"pcanywhere-status": 5632,
"pim-auto-rp": 496,
"radius": 1812,
"radius-acct": 1813,
"rip": 520,
"rtsp": 5004,
"secureid-udp": 5500,
"sip": 5060,
"snmp": 161,
"snmptrap": 162,
"sunrpc": 111,
"syslog": 514,
"tacacs": 49,
"talk": 517,
"tftp": 69,
"time": 37,
"who": 513,
"www": 80,
"xdmcp": 177,
}
# Path: ciscoconfparse2/errors.py
class DuplicateMember(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class InvalidParameters(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class InvalidMember(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class MismatchedType(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class UntypedError(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class InvalidShellVariableMapping(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class NoRegexMatch(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class PythonOptimizeException(BaseError):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class DynamicAddressException(Exception):
"""Throw this if you try to get an address object from a dhcp interface"""
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class ListItemMissingAttribute(Exception):
"""Raise this error if a list() item is missing a required attribute."""
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class UnexpectedType(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class InvalidCiscoInterface(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class InvalidCiscoRange(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class DNSTimeoutError(Exception):
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/errors.py
class RequirementFailure(BaseError):
"""Raise this error instead of using assert foo in non-test code"""
def __init__(self, msg=""):
super().__init__(msg)
self.msg = msg
# Path: ciscoconfparse2/ccp_util.py
from typing import Optional, Callable, Any, Union, List
from operator import attrgetter
from functools import wraps
from collections.abc import Sequence
from collections import UserList
from ipaddress import IPv4Network, IPv6Network, IPv4Address, IPv6Address
from ipaddress import collapse_addresses as ipaddr_collapse_addresses
from ipaddress import AddressValueError
from macaddress import EUI48, EUI64
from dns.exception import DNSException
from dns.resolver import Resolver
from dns import reversename, query, zone
from deprecated import deprecated
from loguru import logger
from ciscoconfparse2.protocol_values import ASA_TCP_PORTS, ASA_UDP_PORTS
from ciscoconfparse2.errors import DuplicateMember, InvalidParameters, InvalidMember, MismatchedType, UntypedError
from ciscoconfparse2.errors import InvalidShellVariableMapping
from ciscoconfparse2.errors import NoRegexMatch
from ciscoconfparse2.errors import PythonOptimizeException
from ciscoconfparse2.errors import DynamicAddressException
from ciscoconfparse2.errors import ListItemMissingAttribute
from ciscoconfparse2.errors import UnexpectedType
from ciscoconfparse2.errors import InvalidCiscoInterface
from ciscoconfparse2.errors import InvalidCiscoRange
from ciscoconfparse2.errors import DNSTimeoutError
from ciscoconfparse2.errors import RequirementFailure
import subprocess
import locale
import socket
import shlex
import time
import copy
import sys
import re
import os
import attrs
import ciscoconfparse2
get_regex : str
Returns the regex string used for an IPv6 Address
netmask : :class:`ipaddress.IPv6Address`
An :class:`ipaddress.IPv6Address` object containing the netmask
network_offset : int
Returns the integer difference between host number and network number. This must be less than `numhosts`
numhosts : int
An integer representing the number of host addresses contained in the network
prefixlen : int
An integer representing the length of the netmask
broadcast: raises `NotImplementedError`; IPv6 doesn't use broadcast addresses
hostmask : :class:`ipaddress.IPv6Address`
An :class:`ipaddress.IPv6Address` representing the hostmask
numhosts : int
An integer representing the number of hosts contained in the network
"""
if isinstance(debug, int):
if debug > 0:
logger.info(f"IPv6Obj(v6input='{v6input}', strict={strict}, debug={debug}) was called")
else:
error = f"IPv6Obj() debug must be an int, but `debug`=`{debug}` was called."
logger.critical(error)
raise ValueError(error)
if v6input is not None:
try:
if isinstance(v6input, (str, int, IPv6Obj)) is False:
raise ValueError()
except ValueError as eee:
error = f"Could not parse '{v6input}' (type: {type(v6input)}) into an IPv6 Address. {eee}"
logger.error(error)
raise AddressValueError(error)
except BaseException as eee:
error = f"Could not parse '{v6input}' (type: {type(v6input)}) into an IPv6 Address. {eee}"
logger.error(error)
raise AddressValueError(error)
# Initialize attributes
self.ip_object = None
self.network_object = None
self.finished_parsing = False
self.v6input = v6input
self.dna = "IPv6Obj"
self.strict = strict
self.debug = debug
self.empty = False
self.__setstate__ = None
if v6input is None:
self.empty = True
elif isinstance(v6input, str):
if len(v6input) > IPV6_MAXSTR_LEN:
raise RequirementFailure()
tmp = re.split(r"\s+", v6input.strip())
if len(tmp) == 2:
v6input = "/".join(tmp)
elif len(tmp) == 1:
v6input = tmp[0]
else:
raise NotImplementedError(v6input.strip())
v6_str_rgx = _RGX_IPV6ADDR.search(v6input.strip())
# Example 'v6_groupdict'
# v6_groupdict = {'addr': '2b00:cd80:14:10::1', 'opt1': None, 'opt2': None, 'opt3': None, 'opt4': None, 'opt5': '2b00:cd80:14:10::1', 'opt6': None, 'opt7': None, 'opt8': None, 'opt9': None, 'opt10': None, 'masklen': '64'}
v6_groupdict = v6_str_rgx.groupdict()
for key in ["addr", "opt1", "opt2", "opt3", "opt4", "opt5", "opt6", "opt7", "opt8", "opt9", "opt10", "opt11"]:
_ipv6 = v6_groupdict[key]
if _ipv6 is not None:
break
else:
_ipv6 = "::1"
if _ipv6 is None:
raise RequirementFailure()
self.ip_object = IPv6Address(_ipv6)
if isinstance(v6_groupdict["masklen"], str):
netstr = _ipv6 + "/" + v6_groupdict["masklen"]
# FIXME - this probably should be removed...
#elif isinstance(v6_groupdict["netmask"], str):
# netstr = ipv6 + "/" + v6_groupdict["netmask"]
else:
netstr = _ipv6 + "/128"
self.network_object = IPv6Network(netstr, strict=False)
elif isinstance(v6input, int):
if not (0 <= v6input <= IPV6_MAXINT):
raise RequirementFailure()
self.ip_object = IPv6Address(v6input)
self.network_object = IPv6Network(v6input, strict=False)
elif isinstance(v6input, IPv6Obj):
self.ip_object = IPv6Address(v6input.ip)
self.network_object = IPv6Network(v6input.as_cidr_net, strict=False)
else:
raise AddressValueError(f"Could not parse '{v6input}' {type(v6input)} into an IPv6 Address")
# On IPv6Obj()
def __repr__(self):
# Detect IPv4_mapped IPv6 addresses...
if self.empty is True:
return f"""<IPv6Obj None empty={self.empty}>"""
elif self.is_ipv4_mapped:
return f"""<IPv6Obj ::ffff:{self.ip.ipv4_mapped}/{self.prefixlen}>"""
else:
return f"""<IPv6Obj {str(self.ip)}/{self.prefixlen}>"""
# On IPv6Obj()
def __eq__(self, val):
if self.empty is True:
if val.empty is True:
return True
else:
return False
| try: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xraychen/OFA-Wav2Vec2
# Path: fairseq/data/fairseq_dataset.py
class FairseqDataset(torch.utils.data.Dataset, EpochListening):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self), dtype=np.int64)
@property
def supports_prefetch(self):
"""Whether this dataset supports prefetching."""
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
"""Prefetch the data required for this epoch."""
raise NotImplementedError
def get_batch_shapes(self):
"""
Return a list of valid batch shapes, for example::
[(8, 512), (16, 256), (32, 128)]
The first dimension of each tuple is the batch size and can be ``None``
to automatically infer the max batch size based on ``--max-tokens``.
The second dimension of each tuple is the max supported length as given
by :func:`fairseq.data.FairseqDataset.num_tokens`.
This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size`
to restrict batch shapes. This is useful on TPUs to avoid too many
dynamic shapes (and recompilations).
"""
return None
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
"""
Given an ordered set of indices, return batches according to
*max_tokens*, *max_sentences* and *required_batch_size_multiple*.
"""
from fairseq.data import data_utils
fixed_shapes = self.get_batch_shapes()
if fixed_shapes is not None:
def adjust_bsz(bsz, num_tokens):
if bsz is None:
assert max_tokens is not None, "Must specify --max-tokens"
bsz = max_tokens // num_tokens
if max_sentences is not None:
bsz = min(bsz, max_sentences)
elif (
bsz >= required_batch_size_multiple
and bsz % required_batch_size_multiple != 0
):
bsz -= bsz % required_batch_size_multiple
return bsz
fixed_shapes = np.array(
[
[adjust_bsz(bsz, num_tokens), num_tokens]
for (bsz, num_tokens) in fixed_shapes
]
)
try:
num_tokens_vec = self.num_tokens_vec(indices).astype("int64")
except NotImplementedError:
num_tokens_vec = None
return data_utils.batch_by_size(
indices,
num_tokens_fn=self.num_tokens,
num_tokens_vec=num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
fixed_shapes=fixed_shapes,
)
def filter_indices_by_size(self, indices, max_sizes):
"""
Filter a list of sample indices. Remove those that are longer than
specified in *max_sizes*.
WARNING: don't update, override method in child classes
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
if isinstance(max_sizes, float) or isinstance(max_sizes, int):
if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray):
ignored = indices[self.sizes[indices] > max_sizes].tolist()
indices = indices[self.sizes[indices] <= max_sizes]
elif (
hasattr(self, "sizes")
and isinstance(self.sizes, list)
and len(self.sizes) == 1
):
ignored = indices[self.sizes[0][indices] > max_sizes].tolist()
indices = indices[self.sizes[0][indices] <= max_sizes]
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
else:
indices, ignored = data_utils._filter_by_size_dynamic(
indices, self.size, max_sizes
)
return indices, ignored
@property
def supports_fetch_outside_dataloader(self):
"""Whether this dataset supports fetching outside the workers of the dataloader."""
return True
# Path: fairseq/data/data_utils.py
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
require_same_masks: bool = True,
mask_dropout: float = 0.0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample
mask_dropout: randomly dropout this percentage of masks in each example
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len and require_same_masks:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
if mask_dropout > 0:
num_holes = np.rint(len(mask_idc) * mask_dropout).astype(int)
mask_idc = np.random.choice(
mask_idc, len(mask_idc) - num_holes, replace=False
)
mask[i, mask_idc] = True
return mask
# Path: fairseq/data/data_utils.py
def get_buckets(sizes, num_buckets):
buckets = np.unique(
np.percentile(
sizes,
np.linspace(0, 100, num_buckets + 1),
interpolation="lower",
)[1:]
)
return buckets
# Path: fairseq/data/data_utils.py
def get_bucketed_sizes(orig_sizes, buckets):
sizes = np.copy(orig_sizes)
assert np.min(sizes) >= 0
start_val = -1
for end_val in buckets:
mask = (sizes > start_val) & (sizes <= end_val)
sizes[mask] = end_val
start_val = end_val
return sizes
# Path: fairseq/data/audio/audio_utils.py
def parse_path(path: str) -> Tuple[str, List[int]]:
"""Parse data path which is either a path to
1. a .npy/.wav/.flac/.ogg file
2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]"
Args:
path (str): the data path to parse
Returns:
file_path (str): the file path
slice_ptr (list of int): empty in case 1;
byte offset and length for the slice in case 2
"""
if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
_path, slice_ptr = path, []
else:
_path, *slice_ptr = path.split(":")
if not Path(_path).is_file():
raise FileNotFoundError(f"File not found: {_path}")
assert len(slice_ptr) in {0, 2}, f"Invalid path: {path}"
slice_ptr = [int(i) for i in slice_ptr]
return _path, slice_ptr
# Path: fairseq/data/audio/audio_utils.py
def read_from_stored_zip(zip_path: str, offset: int, length: int) -> bytes:
return mmap_read(zip_path, offset, length)
# Path: fairseq/data/audio/audio_utils.py
def is_sf_audio_data(data: bytes) -> bool:
is_wav = data[0] == 82 and data[1] == 73 and data[2] == 70
is_flac = data[0] == 102 and data[1] == 76 and data[2] == 97
is_ogg = data[0] == 79 and data[1] == 103 and data[2] == 103
return is_wav or is_flac or is_ogg
# Path: fairseq/data/text_compressor.py
class TextCompressor(object):
def __init__(
self, level: TextCompressionLevel, max_input_byte_length: int = 2**16
):
self.level = level
self.max_input_length = max_input_byte_length
def compress(self, text: str) -> bytes:
if self.level == TextCompressionLevel.low:
import zlib
# zlib: built-in, fast
return zlib.compress(text.encode(), level=0)
elif self.level == TextCompressionLevel.high:
try:
import unishox2
# unishox2: optimized for short text but slower
except ImportError:
raise ImportError(
"Please install unishox2 for the text compression feature: "
"pip install unishox2-py3"
)
assert len(text.encode()) <= self.max_input_length
return unishox2.compress(text)[0]
else:
return text.encode()
def decompress(self, compressed: bytes) -> str:
if self.level == TextCompressionLevel.low:
import zlib
return zlib.decompress(compressed).decode()
elif self.level == TextCompressionLevel.high:
try:
import unishox2
except ImportError:
raise ImportError(
"Please install unishox2 for the text compression feature: "
"pip install unishox2-py3"
)
return unishox2.decompress(compressed, self.max_input_length)
else:
return compressed.decode()
# Path: fairseq/data/text_compressor.py
class TextCompressionLevel(Enum):
none = 0
low = 1
high = 2
# Path: fairseq/data/audio/raw_audio_dataset.py
import logging
import os
import sys
import io
import numpy as np
import torch
import torch.nn.functional as F
import pyarrow
import soundfile as sf
import soundfile as sf
from .. import FairseqDataset
from ..data_utils import compute_mask_indices, get_buckets, get_bucketed_sizes
from fairseq.data.audio.audio_utils import (
parse_path,
read_from_stored_zip,
is_sf_audio_data,
)
from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel
from torch.nn.utils.rnn import pad_sequence
from fairseq.data import data_utils, Dictionary
compute_mask_indices=compute_mask_indices,
**mask_compute_kwargs,
)
self.text_compressor = TextCompressor(level=text_compression_level)
skipped = 0
self.fnames = []
sizes = []
self.skipped_indices = set()
with open(manifest_path, "r") as f:
self.root_dir = f.readline().strip()
for i, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_sample_size is not None and sz < min_sample_size:
skipped += 1
self.skipped_indices.add(i)
continue
self.fnames.append(self.text_compressor.compress(items[0]))
sizes.append(sz)
logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples")
self.sizes = np.array(sizes, dtype=np.int64)
try:
self.fnames = pyarrow.array(self.fnames)
except:
logger.debug(
"Could not create a pyarrow array. Please install pyarrow for better performance"
)
pass
self.set_bucket_info(num_buckets)
self.alpha_root = alpha_root
self.boundaries_root = boundaries_root
def __getitem__(self, index):
fn = self.fnames[index]
fn = fn if isinstance(self.fnames, list) else fn.as_py()
fn = self.text_compressor.decompress(fn)
path_or_fp = os.path.join(self.root_dir, fn)
_path, slice_ptr = parse_path(path_or_fp)
if len(slice_ptr) == 2:
byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
assert is_sf_audio_data(byte_data)
path_or_fp = io.BytesIO(byte_data)
wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32")
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
output = {"id": index, "source": feats}
# load alpha
if self.alpha_root != "":
alpha = torch.load(os.path.join(self.alpha_root, fn.split('.')[0] + '.pt'))
output.update({"alpha": alpha})
if self.boundaries_root != "":
boundaries = torch.load(os.path.join(self.boundaries_root, fn.split('.')[0] + '.pt'))
output.update({"boundaries": boundaries})
return output
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
sources = [s["source"] for s in samples]
sizes = [len(s) for s in sources]
if self.pad:
target_size = min(max(sizes), self.max_sample_size)
else:
target_size = min(min(sizes), self.max_sample_size)
collated_sources = sources[0].new_zeros(len(sources), target_size)
padding_mask = (
torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None
)
for i, (source, size) in enumerate(zip(sources, sizes)):
diff = size - target_size
if diff == 0:
collated_sources[i] = source
elif diff < 0:
assert self.pad
collated_sources[i] = torch.cat(
[source, source.new_full((-diff,), 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
input = {"source": collated_sources}
out = {"id": torch.LongTensor([s["id"] for s in samples])}
if self.pad:
input["padding_mask"] = padding_mask
if hasattr(self, "num_buckets") and self.num_buckets > 0:
assert self.pad, "Cannot bucket without padding first."
bucket = max(self._bucketed_sizes[s["id"]] for s in samples)
num_pad = bucket - collated_sources.size(-1)
if num_pad:
input["source"] = self._bucket_tensor(collated_sources, num_pad, 0)
input["padding_mask"] = self._bucket_tensor(padding_mask, num_pad, True)
if self.compute_mask_indices:
B = input["source"].size(0)
T = self._get_mask_indices_dims(input["source"].size(-1))
padding_mask_reshaped = input["padding_mask"].clone()
extra = padding_mask_reshaped.size(1) % T
if extra > 0:
padding_mask_reshaped = padding_mask_reshaped[:, :-extra]
| padding_mask_reshaped = padding_mask_reshaped.view( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zihuixue/AlignEgoExo
# Path: utils/config.py
# Path: utils/load_model.py
def load_ckpt(backbone, ckpt_name):
print(f'Loading pre-trained model: {ckpt_name}')
ckpt = torch.load(
ckpt_name,
map_location=lambda storage, loc: storage,
)
def remove_first_module(key):
return ".".join(key.split(".")[1:])
key = "state_dict" if "state_dict" in ckpt.keys() else "model_state"
state_dict = {
remove_first_module(k): v
for k, v in ckpt[key].items()
}
missing_keys, unexpected_keys = backbone.load_state_dict(
state_dict, strict=False
)
print('missing', missing_keys)
print('unexpected', unexpected_keys)
# Path: models/embedder.py
class Embedder(nn.Module):
def __init__(self, args):
super(Embedder, self).__init__()
self.args = args
self.num_context_steps = args.num_context_steps
if args.base_model_name == 'resnet50':
self.base_model = resnet50(pretrained=True)
del self.base_model.layer4
del self.base_model.fc
elif args.base_model_name == 'resnet18':
self.base_model = resnet18(pretrained=False)
del self.base_model.layer4
del self.base_model.fc
elif args.base_model_name == 'vgg11':
self.base_model = BaseVGG11(pretrained=False)
else:
raise NotImplementedError
if args.freeze_base:
self.freeze_base_model()
c = 1024 if 'resnet' in args.base_model_name else 512
self.conv_layers = nn.Sequential(
nn.Conv3d(c, 256, kernel_size=3, padding=1),
nn.BatchNorm3d(256),
nn.ReLU(),
nn.Conv3d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm3d(256),
nn.ReLU(),
)
self.fc_layers = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(256, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(256, 256),
nn.ReLU(),
)
if args.input_size == 224:
self.ksize = 14
elif args.input_size == 168:
self.ksize = 11 if 'resnet' in args.base_model_name else 5
else:
raise NotImplementedError
self.maxpool = nn.MaxPool3d(
(self.num_context_steps, self.ksize, self.ksize)
)
self.embedding_layer = nn.Linear(256, args.embedding_size)
self.dropout = nn.Dropout(0.1)
def freeze_base_model(self):
for param in self.base_model.parameters():
param.requires_grad = False
def forward(self, x, bbox=None):
# x: (bs, ts=32, 3, 224/168, 224/168)
bs, ts, c, h, w = x.size()
x = x.reshape(-1, c, h, w)
x = self.base_model(x) # (bs, 3, 168, 168) -> (bs, 1024, 11, 11)
_, c, h, w = x.size()
x = x.contiguous().view(-1, c, self.num_context_steps, h, w)
x = self.dropout(x)
x = self.conv_layers(x)
x = self.maxpool(x)
_, c, _, _, _ = x.size()
x = x.reshape(bs, -1, c)
x = self.fc_layers(x)
x = self.embedding_layer(x.contiguous())
return x
# Path: models/embedder.py
class RoIPosEmbedder(Embedder):
def __init__(self, args):
super(RoIPosEmbedder, self).__init__(args)
self.roi_output_size = 4
self.n_boxes = 4 # 2 hand + 2 object
self.n_tokens = self.n_boxes + 1 # 4 local + 1 global
self.dim = args.hidden_dim
self.n_layers = args.n_layers
self.n_heads = 4
self.dp_rate = 0.1
self.use_mask = args.use_mask
self.use_bbox_pe = args.use_bbox_pe
self.weigh_token_by_bbox = args.weigh_token_by_bbox
self.maxpool_context = nn.MaxPool3d(
(self.num_context_steps, 1, 1)
)
self.roi_align = torchvision.ops.RoIAlign(output_size=(self.roi_output_size, self.roi_output_size),
spatial_scale=0.25,
sampling_ratio=-1)
self.proj_local = nn.Linear(64 * self.roi_output_size * self.roi_output_size, self.dim)
self.proj_global = nn.Linear(256, self.dim)
self.scale_factors = torch.tensor([1 / self.args.input_size] * 4 + [1], dtype=torch.float32)
self.pos_embed = nn.Parameter(torch.randn(5, self.dim), requires_grad=True)
self.token_embed = nn.Parameter(torch.randn(1, self.n_tokens, self.dim), requires_grad=True)
self.transformer_encoder = nn.TransformerEncoder(
encoder_layer=nn.TransformerEncoderLayer(d_model=self.dim, nhead=self.n_heads, dropout=self.dp_rate,
batch_first=True),
num_layers=self.n_layers
)
self.ln = nn.LayerNorm(self.dim)
self.embedding_layer = nn.Sequential(
nn.LayerNorm(self.dim),
nn.Linear(self.dim, args.embedding_size)
)
def pool_context_frames(self, x):
_, c, h, w = x.size()
x = x.contiguous().view(-1, c, self.num_context_steps, h, w)
x = self.dropout(x)
return self.maxpool_context(x).squeeze()
def forward(self, x, bbox):
bs, ts, c, h, w = x.size()
x = x.reshape(-1, c, h, w)
bbox = bbox.reshape(-1, bbox.shape[2], bbox.shape[3])
bbox_list = torch.chunk(bbox[:, :, 0:4], chunks=bbox.shape[0], dim=0)
bbox_list = [b.squeeze() for b in bbox_list]
bbox_id = bbox[:, :, -1].long()
x_mid, x = self.base_model(x, middle=True) # (bs, 3, 168, 168) -> (bs, 256, 42, 42) -> (bs, 1024, 11, 11)
x_mid = self.pool_context_frames(x_mid) # (bs*ts, 64, 56, 56)
x_roi = self.roi_align(x_mid, bbox_list) # (bs*ts*4, 64, 4, 4)
x_roi = x_roi.reshape(-1, self.n_boxes, *(x_roi.shape[1:])) # (bs, ts, 64, 4, 4)
x_local = torch.flatten(x_roi, start_dim=2) # (bs, ts, 1024)
x_local = self.proj_local(x_local)
if self.use_bbox_pe:
bbox_pe = bbox[:, :, 0:5] * self.scale_factors.to(bbox.device)
local_pe = torch.matmul(bbox_pe, self.pos_embed)
x_local = x_local + local_pe
x_local = x_local + self.token_embed[0][bbox_id]
if self.weigh_token_by_bbox:
bbox_prob = bbox[:, :, 4].unsqueeze(-1)
x_local = x_local * bbox_prob
_, c, h, w = x.size()
x = x.contiguous().view(-1, c, self.num_context_steps, h, w)
x = self.dropout(x)
x = self.conv_layers(x)
x = self.maxpool(x)
x_global = self.proj_global(x.squeeze().unsqueeze(1))
x_global = x_global + self.token_embed[:, -1, :]
x = torch.cat((x_local, x_global), dim=1) # (bs*ts/2, n_tokens, dim)
x = self.ln(x)
if self.use_mask:
mask_local = bbox_id.eq(-1)
mask_global = torch.full((mask_local.size(0), 1), False, dtype=torch.bool, device=mask_local.device) # do not mask
mask = torch.cat([mask_local, mask_global], dim=1)
output = self.transformer_encoder(x, src_key_padding_mask=mask)
else:
output = self.transformer_encoder(x)
y = output.mean(dim=1)
y = self.embedding_layer(y)
return y.reshape(bs, -1, y.shape[-1])
# Path: dataset/video_align_dataset.py
class VideoAlignmentDownstreamDataset(VideoAlignmentDataset):
def __init__(self, args, mode):
args.merge_all = True
super(VideoAlignmentDownstreamDataset, self).__init__(args, mode)
self.video_paths1 = sorted(self.video_paths1)
self._load_label()
self._construct_frame_path()
def _construct_frame_path(self):
self.frame_path_list = []
self.video_len_list = []
self.video_ego_id = []
for video in self.video_paths1:
video_frames_count = get_num_frames(video)
self.video_len_list.append(video_frames_count)
video_name = video.replace('.mp4', '').split('/')[-1]
view = video.split('/')[-2]
labels = self.label_dict[video_name]
assert video_frames_count == len(labels)
for frame_id in range(video_frames_count):
self.frame_path_list.append([video, frame_id, labels[frame_id]])
if view == 'ego':
self.video_ego_id.append(1)
else:
self.video_ego_id.append(0)
print(f'Finish constructing frames path list, total len {len(self.frame_path_list)}')
def _load_label(self):
file_path = os.path.join(self.data_path, 'label.pickle')
with open(file_path, 'rb') as handle:
self.label_dict = pickle.load(handle)
def __len__(self):
return len(self.frame_path_list)
def __getitem__(self, idx):
video_path, frame_id, frame_label = self.frame_path_list[idx]
h5_file_name = _extract_frames_h5py(video_path, self.frame_save_path)
context_frame_id = max(0, frame_id - self.frame_stride)
frame = self.get_frames_h5py(h5_file_name, [context_frame_id, frame_id])
frame = np.array(frame).astype(np.float32) # (2, 168, 168, 3)
return frame, frame_label, video_path
# Path: dataset/video_align_dataset_bbox.py
class VideoAlignmentBboxDownstreamDataset(VideoAlignmentDownstreamDataset):
def __init__(self, args, mode):
super(VideoAlignmentBboxDownstreamDataset, self).__init__(args, mode)
self.bbox_threshold = args.bbox_threshold
self._load_bounding_box()
if args.dataset != 'tennis_forehand':
sx = self.args.input_size / self.video_res[0]
sy = self.args.input_size / self.video_res[1]
self.scale_factor = np.array([sx, sy, sx, sy])
else:
sx_ego, sy_ego = self.args.input_size / self.video_res_ego[0], self.args.input_size / \
self.video_res_ego[1]
sx_exo, sy_exo = self.args.input_size / self.video_res_exo[0], self.args.input_size / \
self.video_res_exo[1]
self.scale_factor_ego = np.array([sx_ego, sy_ego, sx_ego, sy_ego])
self.scale_factor_exo = np.array([sx_exo, sy_exo, sx_exo, sy_exo])
self.expansion_ratio = args.bbox_expansion_ratio
def _load_bounding_box(self):
with open(os.path.join(self.data_path, 'det_bounding_box.pickle'), 'rb') as handle:
self.bounding_box_dict = pickle.load(handle)
if self.bbox_threshold > 0.0:
for key, value in self.bounding_box_dict.items():
mask = value[:, :, 4] < self.bbox_threshold
replacement = np.zeros_like(value)
replacement[..., -1] = -1
value[mask] = replacement[mask]
self.bounding_box_dict[key] = value
def __getitem__(self, idx):
video_path, frame_id, frame_label = self.frame_path_list[idx]
h5_file_name = _extract_frames_h5py(video_path, self.frame_save_path)
context_frame_id = max(0, frame_id - self.frame_stride)
frame = self.get_frames_h5py(h5_file_name, [context_frame_id, frame_id])
frame = np.array(frame).astype(np.float32) # (2, 168, 168, 3)
video_name = video_path.replace('ego/', 'ego_').replace('exo/', 'exo_').replace('.mp4', '').split('/')[-1]
bounding_box = self.bounding_box_dict[video_name].copy()
if self.dataset == 'tennis_forehand':
if 'exo' in video_name:
bounding_box[:, :, 0:4] = bounding_box[:, :, 0:4] * self.scale_factor_exo
else:
bounding_box[:, :, 0:4] = bounding_box[:, :, 0:4] * self.scale_factor_ego
else:
bounding_box[:, :, 0:4] = bounding_box[:, :, 0:4] * self.scale_factor
bounding_box = expand_bbox(bounding_box, self.expansion_ratio)
if self.args.one_object_bbox:
bounding_box[:, -1, -1] = -1 # set last object detection result to be null
bounding_box = bounding_box[frame_id]
return frame, frame_label, video_path, bounding_box.astype(np.float32)
# Path: evaluation/kendalls_tau.py
def kendalls_tau(save_path, video_len_list, video_paths, mode, detailed_view=False):
embs = np.load(f'{save_path}/{mode}_embeds.npy')
cur_idx = 0
ego_embs_list, exo_embs_list = [], []
embs_list = []
for i in range(len(video_len_list)):
video_len = video_len_list[i]
tmp = embs[cur_idx: cur_idx + video_len, :]
if 'ego' in video_paths[i]:
ego_embs_list.append(tmp)
else:
exo_embs_list.append(tmp)
embs_list.append(tmp)
cur_idx = cur_idx + video_len
if detailed_view:
print(len(ego_embs_list), len(exo_embs_list), len(embs_list))
print(f'Ego-Ego Kendall Tau {get_kendalls_tau_twolists(ego_embs_list, ego_embs_list, True):.4f}')
print(f'Exo-Exo Kendall Tau {get_kendalls_tau_twolists(exo_embs_list, exo_embs_list, True):.4f}')
print(f'Ego-Exo Kendall Tau {get_kendalls_tau_twolists(ego_embs_list, exo_embs_list, False):.4f}')
print(f'Exo-Ego Kendall Tau {get_kendalls_tau_twolists(exo_embs_list, ego_embs_list, False):.4f}')
tau = get_kendalls_tau(embs_list)
return tau
# Path: evaluation/frame_retrieval.py
def frame_retrieval(save_path, video_len_list, video_paths):
val_embs = np.load(f'{save_path}/val_embeds.npy')
val_labels = np.load(f'{save_path}/val_label.npy')
regular = retrieval_ap_at_k(video_len_list, video_paths, val_embs, val_labels, [10], cross_view=False)
ego2exo, exo2ego = retrieval_ap_at_k(video_len_list, video_paths, val_embs, val_labels, [10], cross_view=True)
return regular, ego2exo, exo2ego
# Path: evaluation/event_completion.py
def compute_progression_value(save_path, train_video_len_list, val_video_len_list, modify_embeddings=False):
train_embs, train_labels, val_embs, val_labels = load_embeds_and_labels(save_path)
train_embs, train_labels = construct_embs_labels_list(train_embs, train_labels, train_video_len_list, modify_embeddings)
val_embs, val_labels = construct_embs_labels_list(val_embs, val_labels, val_video_len_list, modify_embeddings)
lin_model = VectorRegression(sklearn.linear_model.LinearRegression())
lin_model.fit(train_embs, train_labels)
train_score = lin_model.score(train_embs, train_labels)
val_score = lin_model.score(val_embs, val_labels)
return train_score, val_score
# Path: evaluation/classification.py
def classification(save_path, train_video_ego_id, val_video_ego_id):
train_embs, train_labels, val_embs, val_labels = load_embeds_and_labels(save_path)
regular_f1 = fit_svm_model(train_embs, train_labels, val_embs, val_labels, cal_f1_score=True)
train_ego_idx = np.array(train_video_ego_id) == 1
train_exo_idx = np.array(train_video_ego_id) == 0
val_ego_idx = np.array(val_video_ego_id) == 1
val_exo_idx = np.array(val_video_ego_id) == 0
print(f'train: ego frames {np.sum(train_ego_idx)}, exo frames {np.sum(train_exo_idx)} | '
f'val: ego frames {np.sum(val_ego_idx)}, exo frames {np.sum(val_exo_idx)}')
ego2exo_val_f1 = fit_svm_model(train_embs[train_ego_idx], train_labels[train_ego_idx],
val_embs[val_exo_idx], val_labels[val_exo_idx], cal_f1_score=True)
exo2ego_val_f1 = fit_svm_model(train_embs[train_exo_idx], train_labels[train_exo_idx],
val_embs[val_ego_idx], val_labels[val_ego_idx], cal_f1_score=True)
return regular_f1, ego2exo_val_f1, exo2ego_val_f1
# Path: evaluation/evaluate_features.py
import os
import numpy as np
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader
from utils.config import argparser
from utils.load_model import load_ckpt
from models.embedder import Embedder, RoIPosEmbedder
from dataset.video_align_dataset import VideoAlignmentDownstreamDataset
from dataset.video_align_dataset_bbox import VideoAlignmentBboxDownstreamDataset
from evaluation.kendalls_tau import kendalls_tau
from evaluation.frame_retrieval import frame_retrieval
from evaluation.event_completion import compute_progression_value
from evaluation.classification import classification
def prepare_data_loader(args, mode, batch_size=1024, num_workers=0, bbox=False):
if bbox:
dataset = VideoAlignmentBboxDownstreamDataset(args, mode)
else:
dataset = VideoAlignmentDownstreamDataset(args, mode)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
drop_last=False,
)
print(f'Data loader len {len(data_loader)}')
return data_loader, dataset
def extract_embedding(mode, data_loader, model, save_path, device, object_box=False):
embeds_list = []
labels_list = []
for batch in tqdm(data_loader):
if object_box:
frame, frame_label, video_path, bbox = batch
else:
frame, frame_label, video_path = batch
frame = frame.reshape(1, -1, *frame.shape[-3:]) # frame-(1, 64, 168, 168, 3)
frame = frame.permute(0, 1, 4, 2, 3).float().to(device) # (1, 64, 3, 168, 168)
with torch.no_grad():
if object_box:
bbox = bbox.unsqueeze(0).to(device)
embeds = model(frame, bbox)
else:
embeds = model(frame)
embeds = embeds.squeeze().cpu().numpy()
embeds_list.append(embeds)
labels_list.append(frame_label.numpy())
embeds = np.concatenate(embeds_list, axis=0)
np.save(f'{save_path}/{mode}_embeds.npy', embeds)
print(f'Saved {mode} embeds to {save_path}/{mode}_embeds.npy')
labels = np.concatenate(labels_list, axis=0)
np.save(f'{save_path}/{mode}_label.npy', labels)
print(f'Saved {mode} labels to {save_path}/{mode}_label.npy')
def main():
device = torch.device("cuda:0")
args = argparser.parse_args()
assert args.eval_mode in ['val', 'test']
# prepare data loader
object_bbox = True if 'bbox' in args.task else False
loader_train, dataset_train = prepare_data_loader(args, 'train', batch_size=128, num_workers=args.num_workers, bbox=object_bbox)
loader_val, dataset_val = prepare_data_loader(args, args.eval_mode, batch_size=128, num_workers=args.num_workers, bbox=object_bbox)
| assert args.ckpt != '' |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JunMa11/UHNSeg-Quiz
# Path: nnunetv2/configuration.py
ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low
# Path: nnunetv2/evaluation/accumulate_cv_results.py
def accumulate_cv_results(trained_model_folder,
merged_output_folder: str,
folds: Union[List[int], Tuple[int, ...]],
num_processes: int = default_num_processes,
overwrite: bool = True):
"""
There are a lot of things that can get fucked up, so the simplest way to deal with potential problems is to
collect the cv results into a separate folder and then evaluate them again. No messing with summary_json files!
"""
if overwrite and isdir(merged_output_folder):
shutil.rmtree(merged_output_folder)
maybe_mkdir_p(merged_output_folder)
dataset_json = load_json(join(trained_model_folder, 'dataset.json'))
plans_manager = PlansManager(join(trained_model_folder, 'plans.json'))
rw = plans_manager.image_reader_writer_class()
shutil.copy(join(trained_model_folder, 'dataset.json'), join(merged_output_folder, 'dataset.json'))
shutil.copy(join(trained_model_folder, 'plans.json'), join(merged_output_folder, 'plans.json'))
did_we_copy_something = False
for f in folds:
expected_validation_folder = join(trained_model_folder, f'fold_{f}', 'validation')
if not isdir(expected_validation_folder):
raise RuntimeError(f"fold {f} of model {trained_model_folder} is missing. Please train it!")
predicted_files = subfiles(expected_validation_folder, suffix=dataset_json['file_ending'], join=False)
for pf in predicted_files:
if overwrite and isfile(join(merged_output_folder, pf)):
raise RuntimeError(f'More than one of your folds has a prediction for case {pf}')
if overwrite or not isfile(join(merged_output_folder, pf)):
shutil.copy(join(expected_validation_folder, pf), join(merged_output_folder, pf))
did_we_copy_something = True
if did_we_copy_something or not isfile(join(merged_output_folder, 'summary.json')):
label_manager = plans_manager.get_label_manager(dataset_json)
gt_folder = join(nnUNet_raw, plans_manager.dataset_name, 'labelsTr')
if not isdir(gt_folder):
gt_folder = join(nnUNet_preprocessed, plans_manager.dataset_name, 'gt_segmentations')
compute_metrics_on_folder(gt_folder,
merged_output_folder,
join(merged_output_folder, 'summary.json'),
rw,
dataset_json['file_ending'],
label_manager.foreground_regions if label_manager.has_regions else
label_manager.foreground_labels,
label_manager.ignore_label,
num_processes)
# Path: nnunetv2/evaluation/evaluate_predictions.py
def region_or_label_to_mask(segmentation: np.ndarray, region_or_label: Union[int, Tuple[int, ...]]) -> np.ndarray:
if np.isscalar(region_or_label):
return segmentation == region_or_label
else:
mask = np.zeros_like(segmentation, dtype=bool)
for r in region_or_label:
mask[segmentation == r] = True
return mask
# Path: nnunetv2/evaluation/evaluate_predictions.py
def compute_metrics_on_folder(folder_ref: str, folder_pred: str, output_file: str,
image_reader_writer: BaseReaderWriter,
file_ending: str,
regions_or_labels: Union[List[int], List[Union[int, Tuple[int, ...]]]],
ignore_label: int = None,
num_processes: int = default_num_processes,
chill: bool = True) -> dict:
"""
output_file must end with .json; can be None
"""
if output_file is not None:
assert output_file.endswith('.json'), 'output_file should end with .json'
files_pred = subfiles(folder_pred, suffix=file_ending, join=False)
files_ref = subfiles(folder_ref, suffix=file_ending, join=False)
if not chill:
present = [isfile(join(folder_pred, i)) for i in files_ref]
assert all(present), "Not all files in folder_pred exist in folder_ref"
files_ref = [join(folder_ref, i) for i in files_pred]
files_pred = [join(folder_pred, i) for i in files_pred]
with multiprocessing.get_context("spawn").Pool(num_processes) as pool:
# for i in list(zip(files_ref, files_pred, [image_reader_writer] * len(files_pred), [regions_or_labels] * len(files_pred), [ignore_label] * len(files_pred))):
# compute_metrics(*i)
results = pool.starmap(
compute_metrics,
list(zip(files_ref, files_pred, [image_reader_writer] * len(files_pred), [regions_or_labels] * len(files_pred),
[ignore_label] * len(files_pred)))
)
# mean metric per class
metric_list = list(results[0]['metrics'][regions_or_labels[0]].keys())
means = {}
for r in regions_or_labels:
means[r] = {}
for m in metric_list:
means[r][m] = np.nanmean([i['metrics'][r][m] for i in results])
# foreground mean
foreground_mean = {}
for m in metric_list:
values = []
for k in means.keys():
if k == 0 or k == '0':
continue
values.append(means[k][m])
foreground_mean[m] = np.mean(values)
[recursive_fix_for_json_export(i) for i in results]
recursive_fix_for_json_export(means)
recursive_fix_for_json_export(foreground_mean)
result = {'metric_per_case': results, 'mean': means, 'foreground_mean': foreground_mean}
if output_file is not None:
save_summary_json(result, output_file)
return result
# print('DONE')
# Path: nnunetv2/evaluation/evaluate_predictions.py
def load_summary_json(filename: str):
results = load_json(filename)
# convert keys in mean metrics
results['mean'] = {key_to_label_or_region(k): results['mean'][k] for k in results['mean'].keys()}
# convert metric_per_case
for i in range(len(results["metric_per_case"])):
results["metric_per_case"][i]['metrics'] = \
{key_to_label_or_region(k): results["metric_per_case"][i]['metrics'][k]
for k in results["metric_per_case"][i]['metrics'].keys()}
return results
# Path: nnunetv2/evaluation/evaluate_predictions.py
def label_or_region_to_key(label_or_region: Union[int, Tuple[int]]):
return str(label_or_region)
# Path: nnunetv2/imageio/base_reader_writer.py
class BaseReaderWriter(ABC):
@staticmethod
def _check_all_same(input_list):
# compare all entries to the first
for i in input_list[1:]:
if i != input_list[0]:
return False
return True
@staticmethod
def _check_all_same_array(input_list):
# compare all entries to the first
for i in input_list[1:]:
if i.shape != input_list[0].shape or not np.allclose(i, input_list[0]):
return False
return True
@abstractmethod
def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[np.ndarray, dict]:
"""
Reads a sequence of images and returns a 4d (!) np.ndarray along with a dictionary. The 4d array must have the
modalities (or color channels, or however you would like to call them) in its first axis, followed by the
spatial dimensions (so shape must be c,x,y,z where c is the number of modalities (can be 1)).
Use the dictionary to store necessary meta information that is lost when converting to numpy arrays, for
example the Spacing, Orientation and Direction of the image. This dictionary will be handed over to write_seg
for exporting the predicted segmentations, so make sure you have everything you need in there!
IMPORTANT: dict MUST have a 'spacing' key with a tuple/list of length 3 with the voxel spacing of the np.ndarray.
Example: my_dict = {'spacing': (3, 0.5, 0.5), ...}. This is needed for planning and
preprocessing. The ordering of the numbers must correspond to the axis ordering in the returned numpy array. So
if the array has shape c,x,y,z and the spacing is (a,b,c) then a must be the spacing of x, b the spacing of y
and c the spacing of z.
In the case of 2D images, the returned array should have shape (c, 1, x, y) and the spacing should be
(999, sp_x, sp_y). Make sure 999 is larger than sp_x and sp_y! Example: shape=(3, 1, 224, 224),
spacing=(999, 1, 1)
For images that don't have a spacing, set the spacing to 1 (2d exception with 999 for the first axis still applies!)
:param image_fnames:
:return:
1) a np.ndarray of shape (c, x, y, z) where c is the number of image channels (can be 1) and x, y, z are
the spatial dimensions (set x=1 for 2D! Example: (3, 1, 224, 224) for RGB image).
2) a dictionary with metadata. This can be anything. BUT it HAS to include a {'spacing': (a, b, c)} where a
is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set
a=999 (largest spacing value! Make it larger than b and c)
"""
pass
@abstractmethod
def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
"""
Same requirements as BaseReaderWriter.read_image. Returned segmentations must have shape 1,x,y,z. Multiple
segmentations are not (yet?) allowed
If images and segmentations can be read the same way you can just `return self.read_image((image_fname,))`
:param seg_fname:
:return:
1) a np.ndarray of shape (1, x, y, z) where x, y, z are
the spatial dimensions (set x=1 for 2D! Example: (1, 1, 224, 224) for 2D segmentation).
2) a dictionary with metadata. This can be anything. BUT it HAS to include a {'spacing': (a, b, c)} where a
is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set
a=999 (largest spacing value! Make it larger than b and c)
"""
pass
@abstractmethod
def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None:
"""
Export the predicted segmentation to the desired file format. The given seg array will have the same shape and
orientation as the corresponding image data, so you don't need to do any resampling or whatever. Just save :-)
properties is the same dictionary you created during read_images/read_seg so you can use the information here
to restore metadata
IMPORTANT: Segmentations are always 3D! If your input images were 2d then the segmentation will have shape
1,x,y. You need to catch that and export accordingly (for 2d images you need to convert the 3d segmentation
to 2d via seg = seg[0])!
:param seg: A segmentation (np.ndarray, integer) of shape (x, y, z). For 2D segmentations this will be (1, y, z)!
:param output_fname:
:param properties: the dictionary that you created in read_images (the ones this segmentation is based on).
Use this to restore metadata
:return:
"""
pass
# Path: nnunetv2/paths.py
# Path: nnunetv2/utilities/file_path_utilities.py
def folds_tuple_to_string(folds: Union[List[int], Tuple[int, ...]]):
s = str(folds[0])
for f in folds[1:]:
s += f"_{f}"
return s
# Path: nnunetv2/utilities/json_export.py
def recursive_fix_for_json_export(my_dict: dict):
# json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro.
keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys....
for k in keys:
if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)):
tmp = my_dict[k]
del my_dict[k]
my_dict[int(k)] = tmp
del tmp
k = int(k)
if isinstance(my_dict[k], dict):
recursive_fix_for_json_export(my_dict[k])
elif isinstance(my_dict[k], np.ndarray):
assert my_dict[k].ndim == 1, 'only 1d arrays are supported'
my_dict[k] = fix_types_iterable(my_dict[k], output_type=list)
elif isinstance(my_dict[k], (np.bool_,)):
my_dict[k] = bool(my_dict[k])
elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)):
my_dict[k] = int(my_dict[k])
elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)):
my_dict[k] = float(my_dict[k])
elif isinstance(my_dict[k], list):
my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k]))
elif isinstance(my_dict[k], tuple):
my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple)
elif isinstance(my_dict[k], torch.device):
my_dict[k] = str(my_dict[k])
else:
pass # pray it can be serialized
# Path: nnunetv2/utilities/plans_handling/plans_handler.py
class PlansManager(object):
def __init__(self, plans_file_or_dict: Union[str, dict]):
"""
Why do we need this?
1) resolve inheritance in configurations
2) expose otherwise annoying stuff like getting the label manager or IO class from a string
3) clearly expose the things that are in the plans instead of hiding them in a dict
4) cache shit
This class does not prevent you from going wild. You can still use the plans directly if you prefer
(PlansHandler.plans['key'])
"""
self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict)
def __repr__(self):
return self.plans.__repr__()
def _internal_resolve_configuration_inheritance(self, configuration_name: str,
visited: Tuple[str, ...] = None) -> dict:
if configuration_name not in self.plans['configurations'].keys():
raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid '
f'configuration names are {list(self.plans["configurations"].keys())}.')
configuration = deepcopy(self.plans['configurations'][configuration_name])
if 'inherits_from' in configuration:
parent_config_name = configuration['inherits_from']
if visited is None:
visited = (configuration_name,)
else:
if parent_config_name in visited:
raise RuntimeError(f"Circular dependency detected. The following configurations were visited "
f"while solving inheritance (in that order!): {visited}. "
f"Current configuration: {configuration_name}. Its parent configuration "
f"is {parent_config_name}.")
visited = (*visited, configuration_name)
base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited)
base_config.update(configuration)
configuration = base_config
return configuration
@lru_cache(maxsize=10)
def get_configuration(self, configuration_name: str):
if configuration_name not in self.plans['configurations'].keys():
raise RuntimeError(f"Requested configuration {configuration_name} not found in plans. "
f"Available configurations: {list(self.plans['configurations'].keys())}")
configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name)
return ConfigurationManager(configuration_dict)
@property
def dataset_name(self) -> str:
return self.plans['dataset_name']
@property
def plans_name(self) -> str:
return self.plans['plans_name']
@property
def original_median_spacing_after_transp(self) -> List[float]:
return self.plans['original_median_spacing_after_transp']
@property
def original_median_shape_after_transp(self) -> List[float]:
return self.plans['original_median_shape_after_transp']
@property
@lru_cache(maxsize=1)
def image_reader_writer_class(self) -> Type[BaseReaderWriter]:
return recursive_find_reader_writer_by_name(self.plans['image_reader_writer'])
@property
def transpose_forward(self) -> List[int]:
return self.plans['transpose_forward']
@property
def transpose_backward(self) -> List[int]:
return self.plans['transpose_backward']
@property
def available_configurations(self) -> List[str]:
return list(self.plans['configurations'].keys())
@property
@lru_cache(maxsize=1)
def experiment_planner_class(self) -> Type[ExperimentPlanner]:
planner_name = self.experiment_planner_name
experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], "experiment_planning"),
planner_name,
current_module="nnunetv2.experiment_planning")
return experiment_planner
@property
def experiment_planner_name(self) -> str:
return self.plans['experiment_planner_used']
@property
@lru_cache(maxsize=1)
def label_manager_class(self) -> Type[LabelManager]:
return get_labelmanager_class_from_plans(self.plans)
def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager:
return self.label_manager_class(label_dict=dataset_json['labels'],
regions_class_order=dataset_json.get('regions_class_order'),
**kwargs)
@property
def foreground_intensity_properties_per_channel(self) -> dict:
if 'foreground_intensity_properties_per_channel' not in self.plans.keys():
if 'foreground_intensity_properties_by_modality' in self.plans.keys():
return self.plans['foreground_intensity_properties_by_modality']
return self.plans['foreground_intensity_properties_per_channel']
# Path: nnunetv2/postprocessing/remove_connected_components.py
import argparse
import multiprocessing
import shutil
import numpy as np
from multiprocessing import Pool
from typing import Union, Tuple, List, Callable
from acvl_utils.morphology.morphology_helper import remove_all_but_largest_component
from batchgenerators.utilities.file_and_folder_operations import load_json, subfiles, maybe_mkdir_p, join, isfile, \
isdir, save_pickle, load_pickle, save_json
from nnunetv2.configuration import default_num_processes
from nnunetv2.evaluation.accumulate_cv_results import accumulate_cv_results
from nnunetv2.evaluation.evaluate_predictions import region_or_label_to_mask, compute_metrics_on_folder, \
load_summary_json, label_or_region_to_key
from nnunetv2.imageio.base_reader_writer import BaseReaderWriter
from nnunetv2.paths import nnUNet_raw
from nnunetv2.utilities.file_path_utilities import folds_tuple_to_string
from nnunetv2.utilities.json_export import recursive_fix_for_json_export
from nnunetv2.utilities.plans_handling.plans_handler import PlansManager
def remove_all_but_largest_component_from_segmentation(segmentation: np.ndarray,
labels_or_regions: Union[int, Tuple[int, ...],
List[Union[int, Tuple[int, ...]]]],
background_label: int = 0) -> np.ndarray:
mask = np.zeros_like(segmentation, dtype=bool)
if not isinstance(labels_or_regions, list):
| labels_or_regions = [labels_or_regions] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Inflectra/spira-jira-migration-advanced
# Path: utility.py
def convert_jira_markup_to_html(jira_connection_dict, skip_ssl, jira_markup: str):
render_markup_url = jira_connection_dict["jira_base_url"] + "/rest/api/1.0/render"
if jira_markup is None or jira_markup == "":
return "--EMPTY--"
# Strip all the \x unicode chars.
jira_markup = re.sub(r"\\x([0-9a-fA-F]{2})", "", jira_markup)
# Try to dump a string to json. If it fails return a standard string and warning messages.
if not try_json_dump_string(jira_markup):
return "--MIGRATION OF TEXT FAILED because of error during JSON validation--"
headers = {
"Content-Type": "application/json",
}
body = {
"rendererType": "atlassian-wiki-renderer",
"unrenderedMarkup": jira_markup,
}
response = requests.request(
"POST",
render_markup_url,
headers=headers,
verify=(not skip_ssl),
data=json.dumps(body),
)
if response.status_code != 200:
print(response.text)
print("Conversion of text from jira markup to html failed for text:")
print(jira_markup)
print(repr(jira_markup))
return "--MIGRATION OF TEXT FAILED because of jira renderer error--"
else:
return response.text
# Path: convert_jira_to_spira_issues.py
def find_spira_user_id_by_email(users, person_field, issue):
if not issue["fields"][person_field]:
return 1
else:
user = next(
filter(
lambda x: x["EmailAddress"]
== issue["fields"][person_field]["emailAddress"],
users,
),
None,
)
if user:
return user["UserId"]
else:
return 1
# Path: convert_jira_to_spira_issues.py
def get_jira_data_from_custom_field(issue, jira_custom_fields, jira_field_name):
customfield = next(
filter(lambda x: x["name"] == jira_field_name, jira_custom_fields), None
)
if customfield and customfield["id"] in issue["fields"]:
custom_value = issue["fields"][customfield["id"]]
try:
if is_datetime(str(custom_value)):
custom_value = convert_datetime(custom_value)
except Exception as e:
print(e)
return custom_value
else:
return None
# Path: convert_jira_to_spira_issues.py
def add_custom_properties(
issue,
spira_metadata,
jira_metadata,
custom_props_mapping,
artifact_type,
jira_connection_dict, # Needed for the rich_text_conversion
skip_ssl, # Needed for the rich_text_conversion
) -> list:
custom_properties = [
# Special case for Jira id as it's not in the fields part of the issue
jira_string_field_to_spira_custom_prop(
spira_metadata, artifact_type, "Jira Id", issue["key"]
)
]
custom_prop_to_add = None
# Go through all the mappings of the custom props
for prop in custom_props_mapping:
# Check if it's a datetime type of the custom prop
if prop["type"] == "date_time" or prop["type"] == "date":
# Check if its a custom field, starting with if it's not
if prop["jira_key"] is not None:
time = issue["fields"][prop["jira_key"]]
custom_prop_to_add = jira_datetime_field_to_spira_custom_prop(
spira_metadata, artifact_type, prop["spira_name"], time
)
# Handle it if the value is in a custom field
else:
time = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], prop["jira_custom_field_name"]
)
custom_prop_to_add = jira_datetime_field_to_spira_custom_prop(
spira_metadata, artifact_type, prop["spira_name"], time
)
# Check if it's a text type custom prop
elif prop["type"] == "text":
if prop["jira_key"] is not None:
# Try to dump a string to json. If it fails set a standard string with a warning message.
if not try_json_dump_string(issue["fields"][prop["jira_key"]]):
issue["fields"][
prop["jira_key"]
] = "--MIGRATION OF TEXT FAILED because of error during JSON validation--"
custom_prop_to_add = jira_string_field_to_spira_custom_prop(
spira_metadata,
artifact_type,
prop["spira_name"],
issue["fields"][prop["jira_key"]],
)
else:
text = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], prop["jira_custom_field_name"]
)
custom_prop_to_add = jira_string_field_to_spira_custom_prop(
spira_metadata, artifact_type, prop["spira_name"], text
)
# Check if it's a decimal custom prop
elif prop["type"] == "decimal":
if prop["jira_key"] is not None:
custom_prop_to_add = jira_decimal_field_to_spira_custom_prop(
spira_metadata,
artifact_type,
prop["spira_name"],
issue["fields"][prop["jira_key"]],
)
else:
number = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], prop["jira_custom_field_name"]
)
custom_prop_to_add = jira_decimal_field_to_spira_custom_prop(
spira_metadata, artifact_type, prop["spira_name"], number
)
# Check if it's a rich text custom prop
elif prop["type"] == "rich_text":
if jira_connection_dict is None:
print("No jira connection dict present, can't convert rich text")
continue
# Check if it's a jira key,
if prop["jira_key"] is not None:
custom_prop_to_add = jira_textarea_field_to_spira_custom_prop(
spira_metadata,
artifact_type,
prop["spira_name"],
issue["fields"][prop["jira_key"]],
jira_connection_dict,
skip_ssl,
)
# Else it is a custom field
else:
text = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], prop["jira_custom_field_name"]
)
custom_prop_to_add = jira_textarea_field_to_spira_custom_prop(
spira_metadata,
artifact_type,
prop["spira_name"],
text, # type: ignore
jira_connection_dict,
skip_ssl,
)
# Check if it's a list custom prop
elif prop["type"] == "list":
if prop["jira_key"] is not None:
custom_prop_to_add = jira_list_field_to_spira_custom_prop(
spira_metadata,
artifact_type,
prop["spira_name"],
issue["fields"][prop["jira_key"]]["value"],
)
else:
list_value = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], prop["jira_custom_field_name"]
)
custom_prop_to_add = jira_list_field_to_spira_custom_prop(
spira_metadata, artifact_type, prop["spira_name"], list_value
)
# Check if it's a multiselect list custom prop
elif prop["type"] == "multiselect_list":
# Check if it's a jira key,
# This is not tested since there are no standards fields as multiselect list at the moment.
if prop["jira_key"] is not None:
custom_prop_to_add = jira_multiselect_list_field_to_spira_custom_prop(
spira_metadata,
artifact_type,
prop["spira_name"],
issue["fields"][prop["jira_key"]],
)
else:
list_of_values = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], prop["jira_custom_field_name"]
)
custom_prop_to_add = jira_multiselect_list_field_to_spira_custom_prop(
spira_metadata, artifact_type, prop["spira_name"], list_of_values
)
custom_properties.append(custom_prop_to_add)
return custom_properties
# Path: convert_jira_to_spira_issues.py
def get_mapped_spira_type_name(type_mapping, jira_issue_type) -> str | None:
spira_mapped_type_name = None
for spira_type in type_mapping:
jira_types = type_mapping[spira_type]
if isinstance(jira_types, list) and jira_issue_type in type_mapping[spira_type]:
spira_mapped_type_name = spira_type
break
elif (
isinstance(jira_types, str) and jira_issue_type == type_mapping[spira_type]
):
spira_mapped_type_name = spira_type
break
return spira_mapped_type_name
# Path: convert_jira_to_spira_issues.py
def jira_status_to_spira_status_id(mapping, status_types, issue_status_name) -> int:
mapped_name = mapping[issue_status_name]
status_object = next(filter(lambda x: x["Name"] == mapped_name, status_types), None)
if status_object and "StatusId" in status_object:
return int(status_object["StatusId"])
elif status_object and "RequirementStatusId" in status_object:
return int(status_object["RequirementStatusId"])
elif status_object and "IncidentStatusId" in status_object:
return int(status_object["IncidentStatusId"])
elif status_object and "TaskStatusId" in status_object:
return int(status_object["TaskStatusId"])
elif status_object and "CapabilityStatusId" in status_object:
return int(status_object["CapabilityStatusId"])
else:
return 0
# Path: convert_jira_to_spira_issues.py
def get_mapped_spira_type_name(type_mapping, jira_issue_type) -> str | None:
spira_mapped_type_name = None
for spira_type in type_mapping:
jira_types = type_mapping[spira_type]
if isinstance(jira_types, list) and jira_issue_type in type_mapping[spira_type]:
spira_mapped_type_name = spira_type
break
elif (
isinstance(jira_types, str) and jira_issue_type == type_mapping[spira_type]
):
spira_mapped_type_name = spira_type
break
return spira_mapped_type_name
# Path: convert_jira_to_spira_program.py
import json
from utility import convert_jira_markup_to_html
from convert_jira_to_spira_issues import (
find_spira_user_id_by_email,
get_jira_data_from_custom_field,
add_custom_properties,
get_mapped_spira_type_name,
jira_status_to_spira_status_id,
get_mapped_spira_type_name,
)
issue,
spira_metadata,
jira_metadata,
mapping_dict["custom_props"]["capabilities"],
"capability",
jira_connection_dict,
skip_ssl,
)
capability["payload"] = payload
capability["parent_link"] = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], "Parent Link"
)
capability["epic_link"] = get_jira_data_from_custom_field(
issue, jira_metadata["customfields"], "Epic Link"
)
output_dict["program"].append(capability)
json.dump(output_dict, conversion_output, indent=4)
conversion_output.close()
def convert_jira_versions_to_spira_program_milestones(
capabilities, jira_output_versions_dict, mapping_dict, spira_program_metadata
):
milestones_to_spira = open("temp/milestones_to_spira.json", "w")
input_versions = jira_output_versions_dict["versions"]
to_spira_dict = {"milestones": []}
versions = []
for capability in capabilities:
affectedVersions = capability["fields"]["versions"]
fixVersions = capability["fields"]["fixVersions"]
if len(affectedVersions) > 0:
print("For JIRA key: " + capability["key"])
print(
"This script does not handle affectedVersions, but they are still present:"
)
print(str(affectedVersions))
if len(fixVersions) > 1:
print("For JIRA key: " + capability["key"])
print(
"Spira can't handle more than one fixVersion, the first one will be set. These are the other versions not handled:"
)
print(str(fixVersions[1:]))
if len(fixVersions) > 0:
found_version = next(
filter(
lambda x: x["name"] == fixVersions[0]["name"],
input_versions,
),
None,
)
# This might not work as identical objects might not be detected as such.
if found_version not in versions and found_version is not None:
versions.append(found_version)
for version in versions:
milestone = {}
payload = {
# MilestoneId - READ ONLY - assigned on creation
# Guid - READ ONLY
# CreatorId - Jira does not record creator of versions
# CreatorName
# OwnerId - Jira does not record owner of versions
# OwnerName
"StatusId": calculate_milestone_status_id(
version,
mapping_dict["milestone_statuses"],
spira_program_metadata["statuses"]["milestone"],
),
# StatusIsOpen - READ ONLY
# StatusName
# TypeId - TODO
# TypeName
"Name": version["name"] if "name" in version else "",
"Description": version["description"] if "description" in version else " ",
# ProjectGroupId
# ProjectGroupName
"StartDate": (version["startDate"] + "T00:00:00.000")
if "startDate" in version
else "1970-01-01T00:00:00",
# ChildrenStartDate - probably read only
"EndDate": (version["releaseDate"] + "T00:00:00.000")
if "releaseDate" in version
else "1970-01-01T00:00:00",
# ChildrenEndDate - probably read only
# CreationDate - probably read only - need to use custom properties
# LastUpdateDate - probably read only - need to sue custom properties
# PercentComplete - READ ONLY
# ReleaseCount - READ ONLY
# RequirementCount - READ ONLY
# ConcurrencyGuid - READ ONLY
# CustomProperties - TODO
}
milestone["payload"] = payload
to_spira_dict["milestones"].append(milestone)
json.dump(to_spira_dict, milestones_to_spira, indent=4)
milestones_to_spira.close()
def get_milestone_id_from_jira_issue(issue, milestones):
affectedVersions = issue["fields"]["versions"]
fixVersions = issue["fields"]["fixVersions"]
if len(affectedVersions) > 0:
print("For JIRA key: " + issue["key"])
| print( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RWTH-EBC/vclibpy
# Path: vclibpy/datamodels.py
class Inputs(VariableContainer):
"""
Class defining inputs to calculate the FlowsheetState.
While the inputs are pre-defined, you may add further ones
using the `set` method.
Args:
n (float): Relative compressor speed between 0 and 1.
T_eva_in (float): Secondary side evaporator inlet temperature.
T_con_in (float): Secondary side condenser inlet temperature.
m_flow_eva (float): Secondary side evaporator mass flow rate.
m_flow_con (float): Secondary side condenser mass flow rate.
dT_eva_superheating (float): Super-heating after evaporator.
dT_con_subcooling (float): Subcooling after condenser.
T_ambient (float): Ambient temperature of the machine.
"""
def __init__(
self,
n: float = None,
T_eva_in: float = None,
T_con_in: float = None,
m_flow_eva: float = None,
m_flow_con: float = None,
dT_eva_superheating: float = None,
dT_con_subcooling: float = None,
T_ambient: float = None
):
"""
Initializes an Inputs object with parameters representing external conditions
for the vapor compression cycle.
Args:
n (float): Relative compressor speed between 0 and 1 (unit: -).
T_eva_in (float): Secondary side evaporator inlet temperature (unit: K).
T_con_in (float): Secondary side condenser inlet temperature (unit: K).
m_flow_eva (float): Secondary side evaporator mass flow rate (unit: kg/s).
m_flow_con (float): Secondary side condenser mass flow rate (unit: kg/s).
dT_eva_superheating (float): Super-heating after evaporator (unit: K).
dT_con_subcooling (float): Subcooling after condenser (unit: K).
T_ambient (float): Ambient temperature of the machine (unit: K).
"""
super().__init__()
self.set(
name="n",
value=n,
unit="-",
description="Relative compressor speed"
)
self.set(
name="T_eva_in",
value=T_eva_in,
unit="K",
description="Secondary side evaporator inlet temperature"
)
self.set(
name="T_con_in",
value=T_con_in,
unit="K",
description="Secondary side condenser inlet temperature"
)
self.set(
name="m_flow_con",
value=m_flow_con,
unit="kg/s",
description="Secondary side condenser mass flow rate"
)
self.set(
name="m_flow_eva",
value=m_flow_eva,
unit="kg/s",
description="Secondary side evaporator mass flow rate"
)
self.set(
name="dT_eva_superheating",
value=dT_eva_superheating,
unit="K",
description="Super-heating after evaporator"
)
self.set(
name="dT_con_subcooling",
value=dT_con_subcooling,
unit="K",
description="Subcooling after condenser"
)
if T_ambient is None:
T_ambient = T_eva_in
self.set(
name="T_ambient",
value=T_ambient,
unit="K",
description="Ambient temperature of machine"
)
# Path: vclibpy/flowsheets/base.py
class BaseCycle:
"""
Base class for a heat pump. More complex systems may inherit from this class
All HP have a compressor, two HE and a source and sink.
Therefore, the parameters defined here are general parameters.
Args:
fluid (str): Name of the fluid
evaporator (HeatExchanger): Instance of a heat exchanger used for the evaporator
condenser (HeatExchanger): Instance of a heat exchanger used for the condenser
"""
flowsheet_name: str = "BaseCLass of all HP classes - not to use for map generation"
def __init__(
self,
fluid: str,
evaporator: HeatExchanger,
condenser: HeatExchanger
):
self.fluid: str = fluid
self.evaporator = evaporator
self.condenser = condenser
# Instantiate dummy values
self.med_prop = None
self._p_min = 10000 # So that p>0 at all times
self._p_max = None # Is set by med-prop
def __str__(self):
return self.flowsheet_name
def setup_new_fluid(self, fluid):
# Only do so if new fluid is given
if self.med_prop is not None:
if self.med_prop.fluid_name == fluid:
return
self.med_prop.terminate()
# Else create new instance of MedProp
med_prop_class, med_prop_kwargs = media.get_global_med_prop_and_kwargs()
self.med_prop = med_prop_class(fluid_name=fluid, **med_prop_kwargs)
# Write the instance to the components
for component in self.get_all_components():
component.med_prop = self.med_prop
component.start_secondary_med_prop()
# Get max and min pressure
_, self._p_max, _ = self.med_prop.get_critical_point()
self.fluid = fluid
def terminate(self):
self.med_prop.terminate()
for component in self.get_all_components():
component.terminate_secondary_med_prop()
def get_all_components(self) -> List[BaseComponent]:
return [self.condenser, self.evaporator]
def calc_steady_state(self, inputs: Inputs, fluid: str = None, **kwargs):
"""
Calculate the steady-state performance of a vapor compression cycle
based on given inputs and assumptions.
This function ensures consistent assumptions across different cycles.
It calculates the performance of the heat pump under
specific conditions while adhering to several general assumptions.
General Assumptions:
---------------------
- Isenthalpic expansion valves:
The enthalpy at the inlet equals the enthalpy at the outlet.
- No heat losses in any component:
The heat input to the condenser equals the heat
output of the evaporator plus the power input.
- Input to the evaporator is always in the two-phase region.
- Output of the evaporator and output of the condenser maintain
a constant overheating or subcooling (can be set in Inputs).
Args:
inputs (Inputs):
An instance of the Inputs class containing the
necessary parameters to calculate the flowsheet state.
fluid (str):
The fluid to be used in the calculations.
Required only if 'fluid' is not specified during the object's initialization.
Keyword Arguments:
min_iteration_step (int):
The minimum step size for iterations (default: 1).
save_path_plots (str or None):
The path to save plots (default: None).
If None, no plots are created.
show_iteration (bool):
Whether to display iteration progress (default: False).
T_max (float):
Maximum temperature allowed (default: 273.15 + 150).
use_quick_solver (bool):
Whether to use a quick solver (default: True).
max_err_ntu (float):
Maximum allowable error for the heat exchanger in percent (default: 0.5).
max_err_dT_min (float):
Maximum allowable error for minimum temperature difference in K (default: 0.1).
max_num_iterations (int or None):
Maximum number of iterations allowed (default: None).
Returns:
fs_state (FlowsheetState):
An instance of the FlowsheetState class representing
the calculated state of the vapor compression cycle.
"""
# Settings
min_iteration_step = kwargs.pop("min_iteration_step", 1)
save_path_plots = kwargs.get("save_path_plots", None)
input_name = ";".join([k + "=" + str(np.round(v.value, 3)).replace(".", "_")
for k, v in inputs.get_variables().items()])
show_iteration = kwargs.get("show_iteration", False)
use_quick_solver = kwargs.pop("use_quick_solver", True)
err_ntu = kwargs.pop("max_err_ntu", 0.5)
err_dT_min = kwargs.pop("max_err_dT_min", 0.1)
max_num_iterations = kwargs.pop("max_num_iterations", 1e5)
p_1_history = []
p_2_history = []
if use_quick_solver:
step_p1 = kwargs.get("step_max", 10000)
step_p2 = kwargs.get("step_max", 10000)
else:
step_p1 = min_iteration_step
step_p2 = min_iteration_step
# Setup fluid:
if fluid is None:
fluid = self.fluid
self.setup_new_fluid(fluid)
# First: Iterate with given conditions to get the 4 states and the mass flow rate:
T_1_start = inputs.T_eva_in - inputs.dT_eva_superheating
T_3_start = inputs.T_con_in + inputs.dT_con_subcooling
p_1_start = self.med_prop.calc_state("TQ", T_1_start, 1).p
p_2_start = self.med_prop.calc_state("TQ", T_3_start, 0).p
p_1_next = p_1_start
p_2_next = p_2_start
fs_state = FlowsheetState() # Always log what is happening in the whole flowsheet
fs_state.set(name="Q_con", value=1, unit="W", description="Condenser heat flow rate")
fs_state.set(name="COP", value=0, unit="-", description="Coefficient of performance")
if show_iteration:
fig_iterations, ax_iterations = plt.subplots(2)
num_iterations = 0
while True:
if isinstance(max_num_iterations, (int, float)):
if num_iterations > max_num_iterations:
logger.warning("Maximum number of iterations %s exceeded. Stopping.",
max_num_iterations)
return
if (num_iterations + 1) % (0.1 * max_num_iterations) == 0:
logger.info("Info: %s percent of max_num_iterations %s used",
100 * (num_iterations + 1) / max_num_iterations, max_num_iterations)
p_1 = p_1_next
p_2 = p_2_next
p_1_history.append(p_1)
p_2_history.append(p_2)
if show_iteration:
ax_iterations[0].cla()
ax_iterations[1].cla()
ax_iterations[0].scatter(list(range(len(p_1_history))), p_1_history)
ax_iterations[1].scatter(list(range(len(p_2_history))), p_2_history)
plt.draw()
plt.pause(1e-5)
# Increase counter
num_iterations += 1
# Check critical pressures:
if p_2 >= self._p_max:
if step_p2 == min_iteration_step:
logger.error("Pressure too high. Configuration is infeasible.")
return
p_2_next = p_2 - step_p2
step_p2 /= 10
continue
if p_1 <= self._p_min:
if p_1_next == min_iteration_step:
logger.error("Pressure too low. Configuration is infeasible.")
return
p_1_next = p_1 + step_p1
step_p1 /= 10
continue
# Calculate the states based on the given flowsheet
try:
self.calc_states(p_1, p_2, inputs=inputs, fs_state=fs_state)
except ValueError as err:
logger.error("An error occurred while calculating states. "
"Can't guess next pressures, thus, exiting: %s", err)
return
if save_path_plots is not None and num_iterations == 1 and show_iteration:
self.plot_cycle(save_path=save_path_plots.joinpath(f"{input_name}_initialization.png"), inputs=inputs)
# Check heat exchangers:
error_eva, dT_min_eva = self.evaporator.calc(inputs=inputs, fs_state=fs_state)
if not isinstance(error_eva, float):
print(error_eva)
if error_eva < 0:
p_1_next = p_1 - step_p1
continue
else:
if step_p1 > min_iteration_step:
p_1_next = p_1 + step_p1
step_p1 /= 10
continue
elif error_eva > err_ntu and dT_min_eva > err_dT_min:
step_p1 = 1000
p_1_next = p_1 + step_p1
continue
error_con, dT_min_con = self.condenser.calc(inputs=inputs, fs_state=fs_state)
if error_con < 0:
p_2_next = p_2 + step_p2
continue
else:
if step_p2 > min_iteration_step:
p_2_next = p_2 - step_p2
step_p2 /= 10
continue
elif error_con > err_ntu and dT_min_con > err_dT_min:
p_2_next = p_2 - step_p2
step_p2 = 1000
continue
# If still here, and the values are equal, we may break.
if p_1 == p_1_next and p_2 == p_2_next:
# Check if solution was too far away. If so, jump back
# And decrease the iteration step by factor 10.
if step_p2 > min_iteration_step:
p_2_next = p_2 - step_p2
step_p2 /= 10
continue
if step_p1 > min_iteration_step:
p_1_next = p_1 + step_p1
step_p1 /= 10
continue
logger.info("Breaking: Converged")
break
# Check if values are not converging at all:
p_1_unique = set(p_1_history[-10:])
p_2_unique = set(p_2_history[-10:])
if len(p_1_unique) == 2 and len(p_2_unique) == 2 \
and step_p1 == min_iteration_step and step_p2 == min_iteration_step:
logger.critical("Breaking: not converging at all")
break
if show_iteration:
plt.close(fig_iterations)
# Calculate the heat flow rates for the selected states.
Q_con = self.condenser.calc_Q_flow()
Q_con_outer = self.condenser.calc_secondary_Q_flow(Q_con)
Q_eva = self.evaporator.calc_Q_flow()
Q_eva_outer = self.evaporator.calc_secondary_Q_flow(Q_eva)
self.evaporator.calc(inputs=inputs, fs_state=fs_state)
self.condenser.calc(inputs=inputs, fs_state=fs_state)
P_el = self.calc_electrical_power(fs_state=fs_state, inputs=inputs)
T_con_out = inputs.T_con_in + Q_con_outer / self.condenser.m_flow_secondary_cp
# COP based on P_el and Q_con:
COP_inner = Q_con / P_el
COP_outer = Q_con_outer / P_el
# Calculate carnot quality as a measure of reliability of model:
COP_carnot = (T_con_out / (T_con_out - inputs.T_eva_in))
carnot_quality = COP_inner / COP_carnot
# Calc return temperature:
fs_state.set(
name="P_el", value=P_el, unit="W",
description="Power consumption"
)
fs_state.set(
name="carnot_quality", value=carnot_quality,
unit="-", description="Carnot Quality"
)
fs_state.set(
name="Q_con", value=Q_con, unit="W",
description="Condenser refrigerant heat flow rate"
)
# COP based on P_el and Q_con:
fs_state.set(
name="Q_con_outer", value=Q_con_outer, unit="W",
description="Secondary medium condenser heat flow rate"
)
fs_state.set(
name="Q_eva_outer", value=Q_eva_outer, unit="W",
description="Secondary medium evaporator heat flow rate"
)
fs_state.set(
name="COP", value=COP_inner,
unit="-", description="Coefficient of Performance"
)
fs_state.set(
name="COP_outer", value=COP_outer,
unit="-", description="Outer COP, including heat losses"
)
if save_path_plots is not None:
self.plot_cycle(save_path=save_path_plots.joinpath(f"{input_name}_final_result.png"), inputs=inputs)
return fs_state
@abstractmethod
def get_states_in_order_for_plotting(self):
"""
Function to return all thermodynamic states of cycle
in the correct order for plotting.
Include phase change states to see if your simulation
runs plausible cycles.
Returns:
- List with tuples, first entry being the state and second the mass flow rate
"""
return []
def set_evaporator_outlet_based_on_superheating(self, p_eva: float, inputs: Inputs):
"""
Calculate the outlet state of the evaporator based on
the required degree of superheating.
Args:
p_eva (float): Evaporation pressure
inputs (Inputs): Inputs with superheating level
"""
T_1 = self.med_prop.calc_state("PQ", p_eva, 1).T + inputs.dT_eva_superheating
if inputs.dT_eva_superheating > 0:
self.evaporator.state_outlet = self.med_prop.calc_state("PT", p_eva, T_1)
else:
self.evaporator.state_outlet = self.med_prop.calc_state("PQ", p_eva, 1)
def set_condenser_outlet_based_on_subcooling(self, p_con: float, inputs: Inputs):
"""
Calculate the outlet state of the evaporator based on
the required degree of superheating.
Args:
p_con (float): Condensing pressure
inputs (Inputs): Inputs with superheating level
"""
T_3 = self.med_prop.calc_state("PQ", p_con, 0).T - inputs.dT_con_subcooling
if inputs.dT_con_subcooling > 0:
self.condenser.state_outlet = self.med_prop.calc_state("PT", p_con, T_3)
else:
self.condenser.state_outlet = self.med_prop.calc_state("PQ", p_con, 0)
def plot_cycle(self, save_path: bool, inputs: Inputs, states: list = None):
"""Function to plot the resulting flowsheet of the steady state config."""
if states is None:
states = self.get_states_in_order_for_plotting()
states.append(states[0]) # Plot full cycle
# Unpack state var:
h_T = np.array([state.h for state in states]) / 1000
T = [state.T - 273.15 for state in states]
p = np.array([state.p for state in states])
h_p = h_T
fig, ax = plt.subplots(2, 1, sharex=True)
ax[0].set_ylabel("$T$ in °C")
ax[1].set_xlabel("$h$ in kJ/kgK")
# Two phase limits
ax[0].plot(
self.med_prop.get_two_phase_limits("h") / 1000,
self.med_prop.get_two_phase_limits("T") - 273.15, color="black"
)
ax[0].plot(h_T, T, color="r", marker="s")
self._plot_secondary_heat_flow_rates(ax=ax[0], inputs=inputs)
ax[1].plot(h_p, np.log(p), marker="s", color="r")
# Two phase limits
ax[1].plot(
self.med_prop.get_two_phase_limits("h") / 1000,
np.log(self.med_prop.get_two_phase_limits("p")),
color="black"
)
plt.plot()
ax[1].set_ylabel("$log(p)$")
ax[1].set_ylim([np.min(np.log(p)) * 0.9, np.max(np.log(p)) * 1.1])
ax[0].set_ylim([np.min(T) - 5, np.max(T) + 5])
ax[1].set_xlim([np.min(h_T) * 0.9, np.max(h_T) * 1.1])
ax[0].set_xlim([np.min(h_T) * 0.9, np.max(h_T) * 1.1])
fig.tight_layout()
fig.savefig(save_path)
plt.close(fig)
def _plot_secondary_heat_flow_rates(self, ax, inputs):
Q_con = self.condenser.calc_Q_flow()
Q_eva = self.evaporator.calc_Q_flow()
delta_H_con = np.array([
self.condenser.state_outlet.h * self.condenser.m_flow,
self.condenser.state_outlet.h * self.condenser.m_flow + Q_con
]) / self.condenser.m_flow
delta_H_eva = np.array([
self.evaporator.state_outlet.h * self.evaporator.m_flow,
self.evaporator.state_outlet.h * self.evaporator.m_flow - Q_eva
]) / self.evaporator.m_flow
self.condenser.m_flow_secondary = inputs.m_flow_con
self.condenser.calc_secondary_cp(T=inputs.T_con_in)
self.evaporator.m_flow_secondary = inputs.m_flow_eva
self.evaporator.calc_secondary_cp(T=inputs.T_eva_in)
ax.plot(delta_H_con / 1000, [
inputs.T_con_in - 273.15,
inputs.T_con_in + Q_con / self.condenser.m_flow_secondary_cp - 273.15
], color="b")
ax.plot(delta_H_eva / 1000, [
inputs.T_eva_in - 273.15,
inputs.T_eva_in - Q_eva / self.evaporator.m_flow_secondary_cp - 273.15
], color="b")
@abstractmethod
def calc_electrical_power(self, inputs: Inputs, fs_state: FlowsheetState):
"""Function to calc the electrical power consumption based on the flowsheet used"""
raise NotImplementedError
@abstractmethod
def calc_states(self, p_1, p_2, inputs: Inputs, fs_state: FlowsheetState):
"""
Function to calculate the states and mass flow rates of the flowsheet
and set these into each component based on the given pressure levels p_1 and p_2.
Args:
p_1 (float):
Lower pressure level. If no pressure losses are assumed,
this equals the evaporation pressure and the compressor inlet pressure.
p_2 (float):
Higher pressure level. If no pressure losses are assumed,
this equals the condensing pressure and the compressor outlet pressure.
inputs (Inputs): Inputs of calculation.
fs_state (FlowsheetState): Flowsheet state to save important variables.
"""
raise NotImplementedError
# Path: vclibpy/utils/nominal_design.py
import time
import logging
from vclibpy import Inputs
from vclibpy.flowsheets import BaseCycle
logger = logging.getLogger(__name__)
def nominal_hp_design(
heat_pump: BaseCycle,
inputs: Inputs,
fluid: str,
dT_con: float = None,
dT_eva: float = None,
| **kwargs) -> dict: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LQH-is-newbe/gaussian-splatting-regularization
# Path: scene/colmap_loader.py
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
# Path: scene/colmap_loader.py
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
# Path: scene/colmap_loader.py
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
# Path: scene/colmap_loader.py
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
# Path: scene/colmap_loader.py
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
num_points = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
num_points += 1
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
count = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
xyzs[count] = xyz
rgbs[count] = rgb
errors[count] = error
count += 1
return xyzs, rgbs, errors
# Path: utils/graphics_utils.py
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt)
# Path: utils/graphics_utils.py
def focal2fov(focal, pixels):
return 2*math.atan(pixels/(2*focal))
# Path: utils/graphics_utils.py
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
# Path: utils/sh_utils.py
def SH2RGB(sh):
return sh * C0 + 0.5
# Path: scene/gaussian_model.py
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
def __init__(self, sh_degree : int):
def capture(self):
def restore(self, model_args, training_args):
def get_scaling(self):
def get_rotation(self):
def get_xyz(self):
def get_features(self):
def get_opacity(self):
def get_covariance(self, scaling_modifier = 1):
def oneupSHdegree(self):
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
def training_setup(self, training_args):
def update_learning_rate(self, iteration):
def construct_list_of_attributes(self):
def save_ply(self, path):
def reset_opacity(self):
def load_ply(self, path):
def replace_tensor_to_optimizer(self, tensor, name):
def _prune_optimizer(self, mask):
def prune_points(self, mask):
def cat_tensors_to_optimizer(self, tensors_dict):
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
def densify_and_clone(self, grads, grad_threshold, scene_extent):
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
def add_densification_stats(self, viewspace_point_tensor, update_filter, denom_acc=1):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
# Path: scene/dataset_readers.py
import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=2):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
if eval:
# train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
# test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
train_cam_infos = [c for idx, c in enumerate(cam_infos) if c.image_name.startswith("train")]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if c.image_name.startswith("test")]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
FovY = fovy
| FovX = fovx |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: malcolmchetwyn/realestate_investment_app
# Path: investment_finder.py
def real_estate_investment_analysis(purchase_price, renovation_cost, loan_amount,
interest_rate, loan_term, rental_income,
operating_expenses, vacancy_rate, appreciation_rate,
sale_year, location_score, market_growth_rate):
# Validating input data
if purchase_price <= 0 or loan_amount <= 0 or rental_income <= 0:
return "Invalid input data"
monthly_interest_rate = interest_rate / 12 / 100
number_of_payments = loan_term * 12
# Monthly mortgage payment calculation
monthly_mortgage = loan_amount * (monthly_interest_rate * (1 + monthly_interest_rate)**number_of_payments) / ((1 + monthly_interest_rate)**number_of_payments - 1)
yearly_cash_flows, property_values, remaining_loan_balances = [], [], []
current_loan_balance = loan_amount
property_value = purchase_price + renovation_cost
# Stamp duty and total initial investment calculation
stamp_duty = calculate_stamp_duty(purchase_price)
total_investment = purchase_price + renovation_cost + stamp_duty
capital_investment_required = total_investment - loan_amount
for year in range(1, sale_year + 1):
property_value *= (1 + (appreciation_rate + market_growth_rate) / 100)
adjusted_rental_income = rental_income * 12 * (1 - vacancy_rate / 100)
net_operating_income = adjusted_rental_income - (operating_expenses * 12)
annual_mortgage_payment = monthly_mortgage * 12
cash_flow = net_operating_income - annual_mortgage_payment
yearly_cash_flows.append(cash_flow)
property_values.append(property_value)
interest_for_year = current_loan_balance * interest_rate / 100
principal_paid_for_year = annual_mortgage_payment - interest_for_year
current_loan_balance -= principal_paid_for_year
remaining_loan_balances.append(max(current_loan_balance, 0))
total_cash_flow = sum(yearly_cash_flows)
sale_proceeds = property_values[-1] - remaining_loan_balances[-1]
roi = ((sale_proceeds + total_cash_flow - total_investment) / total_investment) * 100 * location_score
cash_on_cash_return = (yearly_cash_flows[0] / capital_investment_required) * 100
# Corrected DSCR, GRM, BEP calculations
annual_rental_income = rental_income * 12
dscr = net_operating_income / annual_mortgage_payment if annual_mortgage_payment > 0 else float('-inf')
grm = purchase_price / annual_rental_income if annual_rental_income > 0 else float('inf')
bep = (operating_expenses * 12) / adjusted_rental_income if adjusted_rental_income > 0 else float('inf')
# Valid investment check
valid_investment = dscr > 1 and grm > 0 and bep >= 0 and bep <= 1
# Investment score based on normalized metrics
normalized_dscr = (dscr - 1) if dscr > 1 else 0
normalized_grm = 1 / grm if grm > 0 else 0
normalized_bep = 1 - bep if bep >= 0 and bep <= 1 else 0
investment_score = (normalized_dscr + normalized_grm + normalized_bep) / 3 * 10 if valid_investment else 0
return {
"Yearly Cash Flows": yearly_cash_flows,
"Property Values": property_values,
"Remaining Loan Balances": remaining_loan_balances,
"Sale Proceeds": sale_proceeds,
"ROI (%)": roi,
"Cash on Cash Return (%)": cash_on_cash_return,
"Stamp Duty": stamp_duty,
"Total Investment": total_investment,
"Capital Investment Required": capital_investment_required,
"Interest Rate": interest_rate,
"Renovation Cost": renovation_cost,
"Operating Expenses": operating_expenses,
"DSCR": dscr,
"GRM": grm,
"BEP": bep,
"Investment Score": investment_score,
"Valid Investment": valid_investment
}
# Path: investment_finder.py
def calculate_additional_metrics_and_score(results):
# Extract necessary values from the results dictionary
renovation_cost = results["Renovation Cost"]
purchase_price = results["Total Investment"] - results["Stamp Duty"] - renovation_cost
loan_amount = results["Remaining Loan Balances"][0]
interest_rate = results["Interest Rate"]
operating_expenses = results["Operating Expenses"]
annual_rental_income = results["Yearly Cash Flows"][0] + (loan_amount * (interest_rate / 12 / 100) * 12)
dscr = results["DSCR"]
grm = results["GRM"]
bep = results["BEP"]
# Adjusting weights for DSCR, GRM, and BEP
weight_dscr = 0.5 # Assuming DSCR is most critical
weight_grm = 0.3
weight_bep = 0.2
# Assuming valid investment criteria are met
valid_investment = dscr > 1 and grm > 0 and bep >= 0 and bep <= 1
# Updated normalization and scoring
normalized_dscr = min((results["DSCR"] - 1) / (5 - 1), 1) if results["DSCR"] > 1 else 0
normalized_grm = min((12 - results["GRM"]) / (12 - 1), 1) if results["GRM"] >= 1 and results["GRM"] <= 12 else 0
normalized_bep = 1 - results["BEP"] if results["BEP"] >= 0 and results["BEP"] <= 1 else 0
# Weighted score calculation
investment_score = (normalized_dscr * weight_dscr + normalized_grm * weight_grm + normalized_bep * weight_bep) * 10
investment_score = min(investment_score, 10)
return {
"DSCR": dscr,
"GRM": grm,
"BEP": bep,
"Investment Score": investment_score,
"Valid Investment": valid_investment
}
# Path: get_rental_value.py
def submit_address_and_get_data(url, address):
data = {}
normalized_address = normalize_address(address) if address_needs_normalization(address) else address
with webdriver.Chrome(service=Service(ChromeDriverManager().install())) as driver:
driver.get(url)
# Handle cookie consent or any pop-ups
try:
cookie_consent_button = WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, "button[class*='accept-cookies'],button[id*='cookie'],div[id*='cookie']"))
)
cookie_consent_button.click()
except Exception:
pass # No cookie consent or pop-up found
address_field = driver.find_element(By.ID, "da_optional_address_text")
address_field.send_keys(normalized_address)
WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.ID, "da_optional_address_suggest"))
)
suggestion_box = driver.find_element(By.ID, "da_optional_address_suggest")
if "Address/Suburb not found" in suggestion_box.text:
data["Error"] = "Address not found. Please check the address and try again."
return data
suggestions = suggestion_box.find_elements(By.CLASS_NAME, "da_optional_address_item")
if suggestions:
driver.execute_script("arguments[0].click();", suggestions[0])
submit_button = driver.find_element(By.ID, "da_optional_address_button")
submit_button.click()
# Wait for the new page to load
time.sleep(5)
cards = driver.find_elements(By.CLASS_NAME, "da-card")
for card in cards:
title_parts = card.find_elements(By.CLASS_NAME, "da-card-title")
card_title = " ".join([part.text.strip() for part in title_parts])
card_value = card.find_element(By.CLASS_NAME, "da-card-value").text.strip() if card.find_elements(By.CLASS_NAME, "da-card-value") else "N/A"
card_footer = card.find_element(By.CLASS_NAME, "da-card-footer").text.strip() if card.find_elements(By.CLASS_NAME, "da-card-footer") else "N/A"
formatted_title = ' '.join(card_title.split())
data[formatted_title] = {"Value": card_value, "Footer": card_footer}
return data
# Path: get_sale_history_2.py
def get_appreciate_rate(address):
with webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options) as driver:
driver.get("https://www.domain.com.au/property-profile")
# Wait for the popup banner to appear
popup_banner = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'section[data-testid="popupbanner-wrapper"]'))
)
# Find the close button and click it
close_button = popup_banner.find_element(By.CSS_SELECTOR, 'button[data-testid="popupbanner-wrapper__close-cta"]')
close_button.click()
# Wait for the input field to be clickable
WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, 'input[type="search"]'))
)
# Find the input field and send the address
input_field = driver.find_element(By.CSS_SELECTOR, 'input[type="search"]')
input_field.send_keys(address)
# Press the space bar to trigger the list
input_field.send_keys(Keys.SPACE)
# Wait for the results to load (you might need to adjust the waiting condition here)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, 'downshift-0-item-0'))
)
# Click on the first record to make it work
first_record = driver.find_element(By.ID, 'downshift-0-item-0')
first_record.click()
# Extracting property entries
try:
mid_value_text = None
try:
# Locating all containers that might contain the 'Mid' value
mid_containers = driver.find_elements(By.XPATH, "//div[contains(@class, 'css-8nlvsz')][div[text()='Mid']]")
# Iterate through each container found
for container in mid_containers:
currency_element = container.find_element(By.XPATH, ".//div[@data-testid='currency']")
mid_value_text = currency_element.text
break
if mid_value_text is not None:
print(f"Mid Value Text: {mid_value_text}")
else:
print("Mid value text not found.")
except Exception as e:
print(f"An error occurred: {e}")
if mid_value_text is not None:
#mid_price_text = mid_price_element.text
mid_price = convert_price(mid_value_text)
# print(f"Mid price: ${mid_value_text}")
# Current year and the target year (5 years ago)
current_year = datetime.now().year
target_year = current_year - 5
click_view_more_results(driver)
# Waiting for property entries to load
property_entries = WebDriverWait(driver, 10).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'li.css-16ezjtx'))
)
# Initialize variables
closest_sale_year = None
closest_sale_price = None
smallest_year_difference = float('inf')
# Iterate through each property entry to extract the information
for entry in property_entries:
category_elements = entry.find_elements(By.CSS_SELECTOR, 'div[data-testid="fe-co-property-timeline-card-category"]')
# Skip entry if category element is not found
if not category_elements:
continue
category_text = category_elements[0].text
# Processing if category is 'SOLD'
if category_text == 'SOLD':
year = int(entry.find_element(By.CSS_SELECTOR, 'div.css-1qi20sy').text)
price_text = entry.find_element(By.CSS_SELECTOR, 'span.css-b27lqk').text
price = convert_price(price_text)
year_difference = abs(year - target_year)
if year_difference < smallest_year_difference:
smallest_year_difference = year_difference
closest_sale_year = year
closest_sale_price = price
if closest_sale_price is not None:
print(f"Selected Sale: Year - {closest_sale_year}, Price - ${closest_sale_price}")
years_difference = current_year - closest_sale_year
if years_difference > 0:
appreciation_rate = calculate_yearly_appreciation_rate(closest_sale_price, mid_price, years_difference)
formatted_rate = round(appreciation_rate, 2)
return {"appreciation_rate": formatted_rate, "property_mid_price": mid_price}
else:
print("No appreciation calculation due to same year sale.")
return {"appreciation_rate": get_average_appreciation_rate(), "property_mid_price": mid_price}
else:
print("No suitable sale found within 5 years.")
return {"appreciation_rate": get_average_appreciation_rate(), "property_mid_price": mid_price}
except Exception as e:
print("An error occurred:", e)
mid_price = None
return {"appreciation_rate": get_average_appreciation_rate(), "property_mid_price": mid_price}
# Path: process_properties.py
import re
import time
import random
import sys
import numpy as np
import numpy_financial as npf
from investment_finder import real_estate_investment_analysis, calculate_additional_metrics_and_score
from get_rental_value import submit_address_and_get_data
from get_sale_history_2 import get_appreciate_rate
investment_amount_ability = 50000
def extract_median_asking_rent(data):
for key, value in data.items():
if "MEDIAN ASKING RENT" in key:
rent_value = value.get("Value", "")
rent_numbers = re.findall(r'\d+', rent_value)
return ''.join(rent_numbers) if rent_numbers else "Data not available"
return "Data not available"
def write_investment_details_to_file(address, url, investment_score, file_path="good_investments.txt"):
with open(file_path, "a") as file:
file.write(f"Address: {address}, URL: {url}, Investment Score: {investment_score}\n")
def calculate_break_even(cash_flows):
cumulative_cash_flow = np.cumsum(cash_flows)
break_even_year = np.where(cumulative_cash_flow >= 0)[0]
return break_even_year[0] + 1 if break_even_year.size > 0 else None
def calculate_roi_and_irr(total_investment, cash_flows, sale_proceeds):
total_cash_flow = sum(cash_flows) + sale_proceeds
roi = ((total_cash_flow - total_investment) / total_investment) * 100
# Adding a zero initial cash flow for IRR calculation
irr_cash_flows = [-total_investment] + cash_flows + [sale_proceeds]
irr = npf.irr(irr_cash_flows) * 100
| return roi, irr if not np.isnan(irr) else None |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: soumik-kanad/diffssl
# Path: ssl_diff/ssl_model_feedback_fusion.py
class DiffSSLModelFeedbackFusion(nn.Module):
""" SSL model with feedback loop between the decoder features of the UNet and the
encoder features of the UNet
"""
def __init__(self, encoder, diffusion, head, device, mode='freeze',
feedback_arch="C_B_R_C", use_feedback=False, feedback_b_list=None, first_fw_b_list=None, second_fw_b_list=None):
super().__init__()
self.encoder = encoder
self.diffusion = diffusion
self.head = head
self.use_feedback = use_feedback
self.feedback_b_list = feedback_b_list
self.first_fw_b_list = first_fw_b_list
self.second_fw_b_list = second_fw_b_list
self.mode = mode
assert self.mode in ['freeze', 'update', 'mult_fpn', 'add_fpn', 'multi_scale_freeze', "finetune"], f"Mode {self.mode} not supported"
if self.mode == 'freeze' and not use_feedback:
for param in self.encoder.parameters():
param.requires_grad = False
else:
# including fusion finetune, feedback finetune, feedback freeze
# print("=======Freezed param=======")
frozen_decoder_idx = max(self.first_fw_b_list + self.second_fw_b_list) - 19 # -19 to convert block idx to decoder idx
for name, param in self.encoder.named_parameters():
if name.startswith("out."):
param.requires_grad = False
# print(name)
elif name.startswith("output_blocks"):
if int(name.split(".")[1]) >= frozen_decoder_idx:
param.requires_grad = False
# print(name)
self.device = device
if use_feedback:
"""
generate feedback layers
Feedback Architecture: feedback_arch = "C_B_R_C" = Conv, BN, ReLU, Conv
"""
feedback_layers = []
for feedback_b in self.feedback_b_list:
in_dim = DM_FEAT_DIM_DICT[feedback_b]
out_dim = DM_FEAT_DIM_DICT[38-feedback_b]
sequential_model_lst = self.make_layers(feedback_arch, in_dim, out_dim)
feedback_layers.append(nn.Sequential(*sequential_model_lst))
self.feedback_layers = nn.ModuleList(feedback_layers)
def make_layers(self, feedback_arch, in_dim, out_dim):
sequential_model_lst = []
for j in range(len(feedback_arch)):
if feedback_arch[j] == "Res":
""" Use first block to change in_dim to out_dim and then the rest operate on out_dim """
if j == 0: # if the first resblock
sequential_model_lst.append(ResBlock(in_dim, dropout=0.0, out_channels=out_dim, use_conv=False))
else: # if the last resblock
sequential_model_lst.append(ResBlock(out_dim, dropout=0.0, out_channels=out_dim, use_conv=False))
elif feedback_arch[j] == "R":
sequential_model_lst.append(nn.ReLU(inplace=True))
elif feedback_arch[j] == "B":
sequential_model_lst.append(nn.BatchNorm2d(out_dim))
elif feedback_arch[j] == "C":
""" Use first conv to change in_dim to out_dim and then the rest operate on out_dim """
if j == 0:
sequential_model_lst.append(nn.Conv2d(in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=False))
else:
sequential_model_lst.append(nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1, groups=out_dim, bias=False))
elif feedback_arch[j] == "C2":
""" Operate on in_dim the entire time and then for the last conv, change in_dim to out_dim """
if j == len(feedback_arch) - 1:
sequential_model_lst.append(nn.Conv2d(in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=False))
else:
sequential_model_lst.append(nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1, groups=in_dim, bias=False))
elif feedback_arch[j] == "S":
sequential_model_lst.append(nn.SiLU(inplace=True))
elif feedback_arch[j] == "G":
# want to use this as a group norm layer
sequential_model_lst.append(nn.GroupNorm(num_groups=32, num_channels=out_dim, dtype=self.encoder.dtype))
return sequential_model_lst
def generate_feedback(self, features):
""" generate feedback features from decoder features """
feedback_features = []
for idx, b in enumerate(self.feedback_b_list):
feedback_features.append(self.feedback_layers[idx](features[b-1]))
return feedback_features
def forward(self, x, t, unet_model_kwargs={}):
first_fw_feat = []
second_fw_feat = []
for t_ in t:
t_ = t_*torch.ones(x.shape[0],).long().to(self.device)
x_start = x.to(self.device)
x_start = x_start.type(torch.float16) if self.use_fp16 else x_start.type(torch.float32)
noise = torch.randn_like(x_start)
x_t = self.diffusion.q_sample(x_start, t_, noise=noise)
# encoder_features = self.encoder.get_encoder_features(x_t, self.diffusion._scale_timesteps(t), **unet_model_kwargs)
# print([x.shape for x in encoder_features])
""" extract encoder features and decoder features depending on the mode """
if self.use_feedback:
with torch.no_grad():
# TODO : getting all features wastes GPU memory
encoder_features, _, mid_feature, decoder_features = self.encoder.get_all_features(x_t,
self.diffusion._scale_timesteps(t_), 0,
['encoder_features', 'resume_encoder_feature', 'mid_feature', 'decoder_features'],
**unet_model_kwargs)
features = encoder_features + mid_feature + decoder_features
first_fw_feat.append([features[b-1].detach().float() for b in self.first_fw_b_list])
else:
block_feat_lst = self.encoder.get_multiple_features(x_t,
self.diffusion._scale_timesteps(t_),
block_num_lst = self.first_fw_b_list,
**unet_model_kwargs)
first_fw_feat.append([block_feat.float() for block_feat in block_feat_lst])
if self.use_feedback: # use feedback
""" generate feedback features from decoder features """
feedback_features = self.generate_feedback(features)
feedback_features = feedback_features[::-1] # reverse the list of feedback features
""" generate the final features based on the mode """
block_feat_list = self.encoder.get_multiple_features_with_specified_feedback(x=x_t,
timesteps=self.diffusion._scale_timesteps(t_),
block_num_lst=self.second_fw_b_list,
feedback_features=feedback_features, # list of features [0: len(input_blocks) - feedback_starting_point]
feedback_b_list=self.feedback_b_list,
**unet_model_kwargs)
second_fw_feat.append([block_feat.float() for block_feat in block_feat_list])
x = self.head(self.first_fw_b_list, first_fw_feat, self.second_fw_b_list, second_fw_feat, t)
return x
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.use_fp16 = True
self.encoder.convert_to_fp16()
if self.use_feedback:
for idx in range(len(self.feedback_b_list)):
self.feedback_layers[idx].apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.use_fp16 = False
self.encoder.convert_to_fp32()
if self.use_feedback:
for idx in range(len(self.feedback_b_list)):
self.feedback_layers[idx].apply(convert_module_to_f32)
# Path: ssl_diff/head.py
class Head(nn.Module):
def __init__(self, args, feature_dim_dict, feature_size_dict):
super().__init__()
self.fcs = nn.ModuleList()
self.pool = nn.AdaptiveAvgPool2d(args.pre_pool_size)
feature_dims = feature_dim_dict[args.first_fw_b_list[0]] * args.pre_pool_size * args.pre_pool_size
if args.head_arc == '':
self.fcs.append(nn.Linear(feature_dims, args.num_classes))
else:
if '_' in args.head_arc:
hidden_dims = args.head_arc.split('_')
self.fcs.append(nn.Linear(feature_dims, int(hidden_dims[0])))
last_hidden = int(hidden_dims[0])
for hidden_dim in hidden_dims[1:]:
self.fcs.append(nn.Linear(last_hidden, int(hidden_dim)))
last_hidden = int(hidden_dim)
self.fcs.append(nn.Linear(last_hidden, args.num_classes))
else:
self.fcs.append(nn.Linear(feature_dims, int(args.head_arc)))
self.fcs.append(nn.Linear(int(args.head_arc), args.num_classes))
def forward(self, first_fw_b_list, first_fw_feat, second_fw_b_list, second_fw_feat, t_list):
x = first_fw_feat[0][0]
x = self.pool(x)
x = torch.flatten(x, start_dim=1)
if len(self.fcs) == 1:
return self.fcs[0](x)
else:
for fc in self.fcs[:-1]:
x = nn.functional.relu(fc(x))
return self.fcs[-1](x)
# Path: ssl_diff/attention_fusion.py
class AttentionFusion(nn.Module):
def __init__(self, args, feature_dim_dict, fature_size_dict=None):
super(AttentionFusion, self).__init__()
attention_dims = int(args.fusion_arc.split(',')[0].strip().split(':')[2])
pre_layer = {}
for b in set(args.first_fw_b_list + args.second_fw_b_list):
feat_size = min(fature_size_dict[b], args.pre_pool_size)
norm = nn.BatchNorm2d(feature_dim_dict[b]) if args.norm_type == "batch" else nn.LayerNorm([feature_dim_dict[b], feat_size, feat_size])
pre_layer[str(b)] = nn.Sequential(
nn.AdaptiveAvgPool2d(feat_size),
norm,
nn.Conv2d(feature_dim_dict[b], attention_dims, 1),
LambdaLayer(lambda x: rearrange(x, 'b c h w -> b (h w) c')),
)
self.pre_layer = nn.ModuleDict(pre_layer)
self.intra_inter_block_attention = AttentionHead(args.fusion_arc.split("/")[0])
self.feature_dims = attention_dims * len(args.t_list)
self.head = nn.Linear(self.feature_dims, args.num_classes)
def forward(self, first_fw_b_list, first_fw_feat, second_fw_b_list, second_fw_feat, t_list):
if t_list is None: t_list = [0] # for other than Diffusion Model
inter_noise_step_feat = []
for t_idx, t in enumerate(t_list):
block_feat = []
for b_idx, b in enumerate(first_fw_b_list):
x = self.pre_layer[str(b)](first_fw_feat[t_idx][b_idx])
block_feat.append(x)
for b_idx, b in enumerate(second_fw_b_list):
x = self.pre_layer[str(b)](second_fw_feat[t_idx][b_idx])
block_feat.append(x)
x = torch.concat(block_feat, dim=1)
# print("DEBUG: intra_inter_block_feat.in.shape", x.shape)
x = self.intra_inter_block_attention(x)
# print("DEBUG: intra_inter_block_feat.out.shape", x.shape)
inter_noise_step_feat.append(x)
x = torch.concat(inter_noise_step_feat, dim=1)
# print("DEBUG: inter_noise_feat.shape", x.shape)
x = self.head(x)
return x
# Path: ssl_diff/const.py
DM_FEAT_DIM_DICT = {}
# Path: ssl_diff/const.py
DM_FEAT_SIZE_DICT = {}
# Path: guided_diffusion/image_datasets.py
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=True,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param class_cond: if True, include a "y" key in returned dicts for class
label. If classes are not available and this is true, an
exception will be raised.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir:
raise ValueError("unspecified data directory")
all_files = _list_image_files_recursively(data_dir)
classes = None
if class_cond:
# Assume classes are the first part of the filename,
# before an underscore.
# class_names = [bf.basename(path).split("_")[0] for path in all_files]
# Assume classes are the parent directory name
class_names = [bf.basename(bf.dirname(path)) for path in all_files]
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
classes = [sorted_classes[x] for x in class_names]
dataset = ImageDataset(
image_size,
all_files,
classes=classes,
shard=get_rank(),
num_shards=get_world_size(),
random_crop=random_crop,
random_flip=random_flip,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
)
return loader
# Path: guided_diffusion/dist_util.py
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
def is_main_process():
def get_world_size():
def get_rank():
def dev():
def load_state_dict(path, **kwargs):
def sync_params(params):
def _find_free_port():
# Path: guided_diffusion/image_datasets.py
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=True,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param class_cond: if True, include a "y" key in returned dicts for class
label. If classes are not available and this is true, an
exception will be raised.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir:
raise ValueError("unspecified data directory")
all_files = _list_image_files_recursively(data_dir)
classes = None
if class_cond:
# Assume classes are the first part of the filename,
# before an underscore.
# class_names = [bf.basename(path).split("_")[0] for path in all_files]
# Assume classes are the parent directory name
class_names = [bf.basename(bf.dirname(path)) for path in all_files]
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
classes = [sorted_classes[x] for x in class_names]
dataset = ImageDataset(
image_size,
all_files,
classes=classes,
shard=get_rank(),
num_shards=get_world_size(),
random_crop=random_crop,
random_flip=random_flip,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
)
return loader
# Path: guided_diffusion/resample.py
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
# Path: guided_diffusion/script_util.py
def model_and_diffusion_defaults():
"""
Defaults for image training.
"""
res = model_defaults()
res.update(diffusion_defaults())
return res
# Path: guided_diffusion/script_util.py
def create_model_and_diffusion(
image_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
channel_mult,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
use_new_attention_order,
):
model = create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult=channel_mult,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
use_new_attention_order=use_new_attention_order,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
# Path: guided_diffusion/script_util.py
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
# Path: guided_diffusion/script_util.py
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
# Path: finetune.py
import argparse
import torch
import numpy as np
import sys
import os
import glob
import torch.distributed as dist
import wandb
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm
from ssl_diff import DiffSSLModelFeedbackFusion
from ssl_diff import AttentionFusion, Head
from ssl_diff import DM_FEAT_DIM_DICT,DM_FEAT_SIZE_DICT
from guided_diffusion.image_datasets import load_data
from guided_diffusion import dist_util #, logger
from guided_diffusion.image_datasets import load_data
from guided_diffusion.resample import create_named_schedule_sampler
from guided_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
if use_feedback:
optimized_params_lst.append({'params': model.feedback_layers.parameters()})
if args.mode == 'update':
optimized_params_lst.append({'params': model.update_blocks.parameters()})
if args.mode == 'add_fpn' or args.mode == 'mult_fpn':
optimized_params_lst.append({'params': model.fpn_blocks.parameters()})
if args.mode == "finetune":
optimized_params_lst.append({'params': model.encoder.parameters()})
optimizer = torch.optim.SGD(optimized_params_lst, lr=lr)
loss_fn = torch.nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 7, 0.1)
if checkpoint_dict is not None:
print(f"Loading model, optimizer, and scheduler from checkpoint from!")
model.module.head.load_state_dict(checkpoint_dict['model_head'])
if use_feedback:
print("Loading feedback layers")
model.module.feedback_layers.load_state_dict(checkpoint_dict['model_feedback'])
if args.mode == 'update':
print("Loading update blocks")
model.module.update_blocks.load_state_dict(checkpoint_dict['model_update'])
elif args.mode == 'add_fpn' or args.mode == 'mult_fpn':
print("Loading fpn blocks")
model.module.fpn_blocks.load_state_dict(checkpoint_dict['model_fpn'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
scheduler.load_state_dict(checkpoint_dict['scheduler'])
start_epoch = checkpoint_dict['epoch']
else:
start_epoch = 0
losses = []
model.train()
batch_num = 0
for i in range(start_epoch, e):
for batch in (tqdm(train_dataloader, total=len(train_dataloader))):
# # measure execution time in pytorch
# start = torch.cuda.Event(enable_timing=True)
# end = torch.cuda.Event(enable_timing=True)
# start.record()
imgs, extra = batch #next(train_dataloader)
imgs = imgs.to(dist_util.dev())
targets = extra["y"].to(dist_util.dev())
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Inputs: ", start.elapsed_time(end))
# start.record()
output = model(imgs, args.t_list)
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Forward: ", start.elapsed_time(end))
# start.record()
#calculate loss
loss = loss_fn(output, targets)
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Loss: ", start.elapsed_time(end))
#backprop
# start.record()
optimizer.zero_grad()
loss.backward()
# store 'module.encoder.time_embed.0.bias' weight
# import pdb;x pdb.set_trace()
# print(old - model.module.encoder.time_embed[0].bias.clone().detach())
# old = model.module.encoder.time_embed[0].bias.clone().detach()
optimizer.step()
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Backward: ", start.elapsed_time(end))
# start.record()
if len(losses) == 100:
losses = losses[1:]
losses.append(loss.item())
if dist_util.is_main_process():
if (batch_num + 1) % 100 == 0:
print(f'Epoch: {i+1}/{e}, Batch Num: {batch_num+1}: Loss: {np.mean(losses):0.6f}', flush=True)
if args.use_wandb:
wandb.log({"Loss/train": np.mean(losses), "epoch": (batch_num+1) / len(train_dataloader)})
batch_num += 1
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Logging: ", start.elapsed_time(end))
scheduler.step()
if (i + 1) % args.eval_interval == 0:
test(model, test_dataloader, args, 'Val (Test)', i+1)
# Save checkpoint every epoch
if dist_util.is_main_process():
save_file = os.path.join(args.output_dir, f'epoch_latest.pth')
print(f"Saving checkpoint @ Epoch: {i+1} to {save_file}")
save_dict ={
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': i+1
}
save_dict['model_head'] = model.module.head.state_dict()
| if use_feedback: |