source
stringlengths 4.8k
15.8k
| file_name
stringlengths 9
9
| cwe
sequencelengths 1
1
|
---|---|---|
"""
Callback handler for storing generation data in OpenInference format.
OpenInference is an open standard for capturing and storing AI model inferences.
It enables production LLMapp servers to seamlessly integrate with LLM
observability solutions such as Arize and Phoenix.
For more information on the specification, see
https://github.com/Arize-ai/open-inference-spec
"""
import importlib
import uuid
from dataclasses import dataclass, field, fields
from datetime import datetime
from types import ModuleType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
TypeVar,
)
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
from llama_index.core.callbacks.schema import CBEventType, EventPayload
if TYPE_CHECKING:
from pandas import DataFrame
OPENINFERENCE_COLUMN_NAME = "openinference_column_name"
Embedding = List[float]
def _generate_random_id() -> str:
"""Generates a random ID.
Returns:
str: A random ID.
"""
return str(uuid.uuid4())
@dataclass
class QueryData:
"""
Query data with column names following the OpenInference specification.
"""
id: str = field(
default_factory=_generate_random_id,
metadata={OPENINFERENCE_COLUMN_NAME: ":id.id:"},
)
timestamp: Optional[str] = field(
default=None, metadata={OPENINFERENCE_COLUMN_NAME: ":timestamp.iso_8601:"}
)
query_text: Optional[str] = field(
default=None,
metadata={OPENINFERENCE_COLUMN_NAME: ":feature.text:prompt"},
)
query_embedding: Optional[Embedding] = field(
default=None,
metadata={OPENINFERENCE_COLUMN_NAME: ":feature.[float].embedding:prompt"},
)
llm_prompt: Optional[str] = field(
default=None,
metadata={OPENINFERENCE_COLUMN_NAME: ":feature.text:llm_prompt"},
)
llm_messages: Optional[Tuple[str, str]] = field(
default=None,
metadata={OPENINFERENCE_COLUMN_NAME: ":feature.[str]:llm_messages"},
)
response_text: Optional[str] = field(
default=None, metadata={OPENINFERENCE_COLUMN_NAME: ":prediction.text:response"}
)
node_ids: List[str] = field(
default_factory=list,
metadata={
OPENINFERENCE_COLUMN_NAME: ":feature.[str].retrieved_document_ids:prompt"
},
)
scores: List[float] = field(
default_factory=list,
metadata={
OPENINFERENCE_COLUMN_NAME: (
":feature.[float].retrieved_document_scores:prompt"
)
},
)
@dataclass
class NodeData:
"""Node data."""
id: str
node_text: Optional[str] = None
node_embedding: Optional[Embedding] = None
BaseDataType = TypeVar("BaseDataType", QueryData, NodeData)
def as_dataframe(data: Iterable[BaseDataType]) -> "DataFrame":
"""Converts a list of BaseDataType to a pandas dataframe.
Args:
data (Iterable[BaseDataType]): A list of BaseDataType.
Returns:
DataFrame: The converted pandas dataframe.
"""
pandas = _import_package("pandas")
as_dict_list = []
for datum in data:
as_dict = {
field.metadata.get(OPENINFERENCE_COLUMN_NAME, field.name): getattr(
datum, field.name
)
for field in fields(datum)
}
as_dict_list.append(as_dict)
return pandas.DataFrame(as_dict_list)
@dataclass
class TraceData:
"""Trace data."""
query_data: QueryData = field(default_factory=QueryData)
node_datas: List[NodeData] = field(default_factory=list)
def _import_package(package_name: str) -> ModuleType:
"""Dynamically imports a package.
Args:
package_name (str): Name of the package to import.
Raises:
ImportError: If the package is not installed.
Returns:
ModuleType: The imported package.
"""
try:
package = importlib.import_module(package_name)
except ImportError:
raise ImportError(f"The {package_name} package must be installed.")
return package
class OpenInferenceCallbackHandler(BaseCallbackHandler):
"""Callback handler for storing generation data in OpenInference format.
OpenInference is an open standard for capturing and storing AI model
inferences. It enables production LLMapp servers to seamlessly integrate
with LLM observability solutions such as Arize and Phoenix.
For more information on the specification, see
https://github.com/Arize-ai/open-inference-spec
"""
def __init__(
self,
callback: Optional[Callable[[List[QueryData], List[NodeData]], None]] = None,
) -> None:
"""Initializes the OpenInferenceCallbackHandler.
Args:
callback (Optional[Callable[[List[QueryData], List[NodeData]], None]], optional): A
callback function that will be called when a query trace is
completed, often used for logging or persisting query data.
"""
super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
self._callback = callback
self._trace_data = TraceData()
self._query_data_buffer: List[QueryData] = []
self._node_data_buffer: List[NodeData] = []
def start_trace(self, trace_id: Optional[str] = None) -> None:
if trace_id == "query" or trace_id == "chat":
self._trace_data = TraceData()
self._trace_data.query_data.timestamp = datetime.now().isoformat()
self._trace_data.query_data.id = _generate_random_id()
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
if trace_id == "query" or trace_id == "chat":
self._query_data_buffer.append(self._trace_data.query_data)
self._node_data_buffer.extend(self._trace_data.node_datas)
self._trace_data = TraceData()
if self._callback is not None:
self._callback(self._query_data_buffer, self._node_data_buffer)
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
**kwargs: Any,
) -> str:
if payload is not None:
if event_type is CBEventType.QUERY:
query_text = payload[EventPayload.QUERY_STR]
self._trace_data.query_data.query_text = query_text
elif event_type is CBEventType.LLM:
if prompt := payload.get(EventPayload.PROMPT, None):
self._trace_data.query_data.llm_prompt = prompt
if messages := payload.get(EventPayload.MESSAGES, None):
self._trace_data.query_data.llm_messages = [
(m.role.value, m.content) for m in messages
]
# For chat engines there is no query event and thus the
# query text will be None, in this case we set the query
# text to the last message passed to the LLM
if self._trace_data.query_data.query_text is None:
self._trace_data.query_data.query_text = messages[-1].content
return event_id
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
if payload is None:
return
if event_type is CBEventType.RETRIEVE:
for node_with_score in payload[EventPayload.NODES]:
node = node_with_score.node
score = node_with_score.score
self._trace_data.query_data.node_ids.append(node.hash)
self._trace_data.query_data.scores.append(score)
self._trace_data.node_datas.append(
NodeData(
id=node.hash,
node_text=node.text,
)
)
elif event_type is CBEventType.LLM:
if self._trace_data.query_data.response_text is None:
if response := payload.get(EventPayload.RESPONSE, None):
if isinstance(response, ChatResponse):
# If the response is of class ChatResponse the string
# representation has the format "<role>: <message>",
# but we want just the message
response_text = response.message.content
else:
response_text = str(response)
self._trace_data.query_data.response_text = response_text
elif completion := payload.get(EventPayload.COMPLETION, None):
self._trace_data.query_data.response_text = str(completion)
elif event_type is CBEventType.EMBEDDING:
self._trace_data.query_data.query_embedding = payload[
EventPayload.EMBEDDINGS
][0]
def flush_query_data_buffer(self) -> List[QueryData]:
"""Clears the query data buffer and returns the data.
Returns:
List[QueryData]: The query data.
"""
query_data_buffer = self._query_data_buffer
self._query_data_buffer = []
return query_data_buffer
def flush_node_data_buffer(self) -> List[NodeData]:
"""Clears the node data buffer and returns the data.
Returns:
List[NodeData]: The node data.
"""
node_data_buffer = self._node_data_buffer
self._node_data_buffer = []
return node_data_buffer
| 910790.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import importlib
import inspect
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.npu
except ImportError:
pass
class NPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
super().__init__()
self._name = 'npu'
self._communication_backend_name = 'hccl'
self._compile_backend = "inductor"
# dict that holds class name <--> class type mapping i.e.
# 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
# this dict will be filled at init stage
self.class_dict = None
def is_synchronized_device(self):
return False
def use_host_timers(self):
return self.is_synchronized_device()
def resolves_data_dependency(self):
return self.is_synchronized_device()
def handles_memory_backpressure(self):
return self.is_synchronized_device()
# Device APIs
def device_name(self, device_index=None):
if device_index is None:
return 'npu'
return 'npu:{}'.format(device_index)
def device(self, device_index=None):
return torch.npu.device(device_index)
def set_device(self, device_index):
torch.npu.set_device(device_index)
def current_device(self):
return torch.npu.current_device()
def current_device_name(self):
return 'npu:{}'.format(torch.npu.current_device())
def device_count(self):
return torch.npu.device_count()
def synchronize(self, device_index=None):
return torch.npu.synchronize(device_index)
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index is None:
return torch.npu.set_rng_state(new_state)
return torch.npu.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index is None:
return torch.npu.get_rng_state()
return torch.npu.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.npu.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.npu.manual_seed_all(seed)
def initial_seed(self):
return torch.npu.initial_seed()
def default_generator(self, device_index):
return torch.npu.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.npu.Stream
def stream(self, stream):
return torch.npu.stream(stream)
def current_stream(self, device_index=None):
return torch.npu.current_stream(device_index)
def default_stream(self, device_index=None):
return torch.npu.default_stream(device_index)
@property
def Event(self):
return torch.npu.Event
# Memory management
def empty_cache(self):
return torch.npu.empty_cache()
def memory_allocated(self, device_index=None):
return torch.npu.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.npu.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.npu.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.npu.memory_cached(device_index)
def max_memory_cached(self, device_index=None):
return torch.npu.max_memory_cached(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.npu.reset_max_memory_cached(device_index)
def memory_stats(self, device_index=None):
if hasattr(torch.npu, 'memory_stats'):
return torch.npu.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
if hasattr(torch.npu, 'reset_peak_memory_stats'):
return torch.npu.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
if hasattr(torch.npu, 'memory_reserved'):
return torch.npu.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
if hasattr(torch.npu, 'max_memory_reserved'):
return torch.npu.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.npu.get_device_properties(device_index).total_memory
def available_memory(self, device_index=None):
return self.total_memory(device_index) - self.memory_allocated(device_index)
# Data types
def is_bf16_supported(self):
return torch.npu.is_bf16_supported()
def is_fp16_supported(self):
return True
def supported_dtypes(self):
return [torch.float, torch.half, torch.bfloat16]
# Misc
def amp(self):
if hasattr(torch.npu, 'amp'):
return torch.npu.amp
return None
def is_available(self):
return torch.npu.is_available()
def range_push(self, msg):
return
def range_pop(self):
return
def lazy_call(self, callback):
return torch.npu._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
def is_triton_supported(self):
return False
# Graph operations
def create_graph(self):
return None
def capture_to_graph(self, graph, pool=None, stream=None):
from deepspeed.runtime.utils import noop_context
return noop_context()
def replay_graph(self, graph):
return
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.npu.BFloat16Tensor
@property
def ByteTensor(self):
return torch.npu.ByteTensor
@property
def DoubleTensor(self):
return torch.npu.DoubleTensor
@property
def FloatTensor(self):
return torch.npu.FloatTensor
@property
def HalfTensor(self):
return torch.npu.HalfTensor
@property
def IntTensor(self):
return torch.npu.IntTensor
@property
def LongTensor(self):
return torch.npu.LongTensor
def pin_memory(self, tensor, align_bytes=1):
return tensor.pin_memory()
def is_pinned(self, tensor):
return tensor.is_pinned()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('npu:'):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401 # type: ignore
return "op_builder.npu"
except ImportError:
return "deepspeed.ops.op_builder.npu"
def _lazy_init_class_dict(self):
if self.class_dict:
return
op_builder_module = importlib.import_module(self.op_builder_dir())
# get op builder class from op_builder/npu/__init__.py
self.class_dict = {}
for class_name, class_obj in inspect.getmembers(op_builder_module, inspect.isclass):
self.class_dict[class_name] = class_obj
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
builder_class = self.get_op_builder(class_name)
return None if builder_class is None else builder_class()
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
def export_envs(self):
return ['ASCEND', 'HCCL', 'LD_LIBRARY', 'PATH']
def visible_devices_envs(self):
return ['ASCEND_RT_VISIBLE_DEVICES']
def set_visible_devices_envs(self, current_env, local_accelerator_ids):
for env in self.visible_devices_envs():
current_env[env] = ",".join(map(str, local_accelerator_ids))
def get_compile_backend(self):
return self._compile_backend
def set_compile_backend(self, backend):
supported_backends = torch._dynamo.list_backends(exclude_tags=())
if backend in supported_backends:
self._compile_backend = backend
else:
raise ValueError(
f"{backend} not supported by {self.device_name()}. Supported Backends are {supported_backends }")
| 181714.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator
import intel_extension_for_pytorch as ipex # noqa: F401 # type: ignore
import oneccl_bindings_for_pytorch # noqa: F401 # type: ignore
import functools
import importlib
import inspect
class XPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'xpu'
self._communication_backend_name = 'ccl'
self._compile_backend = "inductor"
self.aligned_tensors = []
self.class_dict = None
def is_synchronized_device(self):
return False
def use_host_timers(self):
# WA XPU event will be consolidated in 2.5
if ipex.__version__ < '2.5':
return True
else:
return self.is_synchronized_device()
def resolves_data_dependency(self):
return self.is_synchronized_device()
def handles_memory_backpressure(self):
return self.is_synchronized_device()
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return 'xpu'
return 'xpu:{}'.format(device_index)
def device(self, device_index=None):
return torch.xpu.device(device_index)
def set_device(self, device_index):
torch.xpu.set_device(device_index)
def current_device(self):
return torch.xpu.current_device()
def current_device_name(self):
return 'xpu:{}'.format(torch.xpu.current_device())
def device_count(self):
return torch.xpu.device_count()
def synchronize(self, device_index=None):
return torch.xpu.synchronize(device_index)
# RNG APIs
def random(self):
return torch.xpu.random
def set_rng_state(self, new_state, device_index=None):
if device_index == None:
return torch.xpu.set_rng_state(new_state)
return torch.xpu.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index == None:
return torch.xpu.get_rng_state()
return torch.xpu.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.xpu.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.xpu.manual_seed_all(seed)
def initial_seed(self):
return torch.xpu.initial_seed()
def default_generator(self, device_index):
return torch.xpu.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.xpu.Stream
def stream(self, stream):
return torch.xpu.stream(stream)
def current_stream(self, device_index=None):
return torch.xpu.current_stream(device_index)
def default_stream(self, device_index=None):
# torch.xpu does not support the sync behavior of default stream as cuda
# use current_stream as workaround
# see https://pytorch.org/docs/stable/notes/cuda.html#cuda-streams
return torch.xpu.current_stream(device_index)
@property
def Event(self):
return torch.xpu.Event
# Memory management
def empty_cache(self):
return torch.xpu.empty_cache()
def memory_allocated(self, device_index=None):
return torch.xpu.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.xpu.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.xpu.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.xpu.memory_reserved(device_index)
def max_memory_cached(self, device_index=None):
return torch.xpu.max_memory_reserved(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.xpu.reset_max_memory_reserved(device_index)
def memory_stats(self, device_index=None):
return torch.xpu.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
return torch.xpu.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
return torch.xpu.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
return torch.xpu.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.xpu.get_device_properties(device_index).total_memory
def available_memory(self, device_index=None):
return self.total_memory(device_index) - self.memory_allocated(device_index)
# Misc
def amp(self):
return torch.xpu.amp
def is_available(self):
return torch.xpu.is_available()
def range_push(self, msg):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_push(msg)
return
def range_pop(self):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_pop()
return
def lazy_call(self, callback):
if hasattr(torch.xpu, "_lazy_call"):
return torch.xpu._lazy_call(callback)
else:
return torch.xpu.lazy_init._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
def is_triton_supported(self):
return False
# Graph operations
def create_graph(self):
return None
def capture_to_graph(self, graph, pool=None, stream=None):
from deepspeed.runtime.utils import noop_context
return noop_context()
def replay_graph(self, graph):
return
# Data types
def is_bf16_supported(self):
return True
def is_fp16_supported(self):
return True
def supported_dtypes(self):
return [torch.float, torch.half, torch.bfloat16]
# Tensor operations
@property
def BFloat16Tensor(self):
return functools.partial(torch.tensor, dtype=torch.bfloat16, device=self._name)
@property
def ByteTensor(self):
return functools.partial(torch.tensor, dtype=torch.uint8, device=self._name)
@property
def DoubleTensor(self):
return functools.partial(torch.tensor, dtype=torch.double, device=self._name)
@property
def FloatTensor(self):
return functools.partial(torch.tensor, dtype=torch.float, device=self._name)
@property
def HalfTensor(self):
return functools.partial(torch.tensor, dtype=torch.half, device=self._name)
@property
def IntTensor(self):
return functools.partial(torch.tensor, dtype=torch.int, device=self._name)
@property
def LongTensor(self):
return functools.partial(torch.tensor, dtype=torch.long, device=self._name)
def pin_memory(self, tensor, align_bytes=1):
if align_bytes == 1:
return tensor.pin_memory(device=self.current_device_name())
elif align_bytes == 0:
from deepspeed.ops.op_builder.xpu import AsyncIOBuilder
self.aio_handle = AsyncIOBuilder().load().aio_handle(128 * 1024, 8, False, False, False)
aligned_t = self.aio_handle.new_cpu_locked_tensor(tensor.numel(), tensor)
aligned_t = aligned_t[:tensor.numel()].copy_(tensor)
self.aligned_tensors.append([aligned_t.data_ptr(), aligned_t[-1].data_ptr()])
return aligned_t
def is_pinned(self, tensor):
if tensor.is_pinned(device=self.current_device_name()):
return True
else:
for begin, end in self.aligned_tensors:
if begin <= tensor.data_ptr() and tensor.data_ptr() <= end:
return True
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401 # type: ignore
return "op_builder.xpu"
except ImportError:
return "deepspeed.ops.op_builder.xpu"
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('xpu:'):
return True
else:
return False
def _lazy_init_class_dict(self):
if self.class_dict:
return
op_builder_module = importlib.import_module(self.op_builder_dir())
# get op builder class from op_builder/xpu/__init__.py
self.class_dict = {}
for class_name, class_obj in inspect.getmembers(op_builder_module, inspect.isclass):
self.class_dict[class_name] = class_obj
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
builder_class = self.get_op_builder(class_name)
return builder_class()
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return self.class_dict['NotImplementedBuilder']
def build_extension(self):
try:
from intel_extension_for_pytorch.xpu.cpp_extension import DpcppBuildExtension
except ImportError:
from intel_extension_for_pytorch.xpu.utils import DpcppBuildExtension
return DpcppBuildExtension
def export_envs(self):
return []
def visible_devices_envs(self):
return ['ZE_AFFINITY_MASK']
def set_visible_devices_envs(self, current_env, local_accelerator_ids):
for env in self.visible_devices_envs():
current_env[env] = ",".join(map(str, local_accelerator_ids))
def get_compile_backend(self):
return self._compile_backend
def set_compile_backend(self, backend):
supported_backends = torch._dynamo.list_backends(exclude_tags=())
if backend in supported_backends:
self._compile_backend = backend
else:
raise ValueError(
f"{backend} not supported by {self.device_name()}. Supported Backends are {supported_backends}")
| 190570.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import re
import torch
import types
from typing import List, Tuple, Union
from dataclasses import dataclass
from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS, SUB_PARAM_SHAPE)
@dataclass
class SubparamShape:
patterns: List[str]
shape: Tuple[Union[Tuple[int], int]]
partition_dim: int
def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size):
hp_mapping = self._hp_mapping
hp_mapping.optim_fragment = {}
hp_keys = []
for file in os.listdir(folder):
# We expect files named something like "exp_avg.pt", "exp_avg_sq.pt", "fp32.pt"
pattern = r'(.+).pt'
match = re.search(pattern, file)
if match:
hp_keys.append(match.group(1))
step = None
for key in hp_keys:
ckpt_file = os.path.join(folder, f"{key}.pt")
ckpt_dict = torch.load(ckpt_file)
if key == "step":
step = ckpt_dict
continue
full_hp_param = ckpt_dict[PARAM]
# need to deal with slices that were averaged.
# the opposite of averaging here becomes an exact copy of the first slice
# I thought of 2 ways:
# implementation a. find a way for a client to pass a dict with patterns
# if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
# tp_rank = 0
# tp_world_size = 1
# the other approach is to assume that the saved data is correct and if full_hp_param.shape ==
# self.shape that means we automatically copy?
# implementation b.
# this version requires no additional data passed from the client
# if the shapes already match it must be slices that were averaged - so we just hack around those
if full_hp_param.shape == self.shape:
tp_rank = 0
tp_world_size = 1
# special case for word_embeddings weights which get padded differently depending on TP degree.
# the converter to universal currently strips the original padding completely so the saved
# weight is padding-free and we just need to add new padding depending on the target TP
# degree
is_vocab_tensor = ckpt_dict.get(VOCAB_TENSOR, False)
if is_vocab_tensor:
# In the absence of data passed from the user wrt new padded vocab specific to tp degree
# we can again derive that data by reverse engineering the target shapes like so:
padded_target_vocab_size = self.shape[0] * tp_world_size
assert padded_target_vocab_size >= full_hp_param.shape[0], \
f'Vocab tensor padded size {padded_target_vocab_size} < loaded universal size {full_hp_param.shape[0]}'
if padded_target_vocab_size > full_hp_param.shape[0]:
padding_size = padded_target_vocab_size - full_hp_param.shape[0]
full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0)
full_param_numel = full_hp_param.numel()
tp_slice_numel = self.numel()
# if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder:
# print_rank_0(f'{full_hp_param[:10]=}', force=True)
assert full_param_numel == tp_world_size * tp_slice_numel, \
f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}'
# print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}")
# print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}")
sub_param_shape = ckpt_dict.get(SUB_PARAM_SHAPE, None)
# since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse
# special case is when a single parameter is effectively a container for multiple sub parameters
# (more details at PARAM_N_SUB_PARAMS definition)
chunk_dim = ckpt_dict.get(CAT_DIM, 0)
n_sub_params = ckpt_dict.get(PARAM_N_SUB_PARAMS, 1)
if sub_param_shape:
partition_dim = sub_param_shape.partition_dim
sub_dim_sizes = sub_param_shape.shape[partition_dim]
if not isinstance(sub_dim_sizes, tuple):
sub_dim_sizes = (sub_dim_sizes, )
partition_shape = [sum(d) if isinstance(d, tuple) else d for d in sub_param_shape.shape]
full_hp_param = full_hp_param.view(partition_shape)
offset = 0
merged_chunks = []
for sub_dim_size in sub_dim_sizes:
sub_params_tp_slice = full_hp_param.narrow(partition_dim,
offset, sub_dim_size).chunk(tp_world_size,
dim=partition_dim)[tp_rank]
merged_chunks.append(sub_params_tp_slice)
offset += sub_dim_size
tp_hp_slice = torch.cat(merged_chunks, dim=partition_dim)
elif n_sub_params > 1:
sub_params = full_hp_param.chunk(n_sub_params, dim=chunk_dim)
sub_params_tp_slice = [p.chunk(tp_world_size, dim=chunk_dim)[tp_rank] for p in sub_params]
tp_hp_slice = torch.cat(sub_params_tp_slice, dim=chunk_dim)
else:
# this performs the opposite of cat when merging TP slices
tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank]
tp_hp_slice = tp_hp_slice.flatten()
lp_frag_address = hp_mapping.lp_fragment_address
tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel)
# print(f"{key} SHAPE: {tp_hp_slice.shape=}")
# print(f"{key} SHAPE: {dst_tensor.shape=}")
# print(f"{key} SHAPE: {tp_hp_fragment.shape=}")
if key == FP32_WEIGHT_KEY:
dst_tensor = hp_mapping.get_hp_fragment()
assert dst_tensor.numel() == lp_frag_address.numel, \
f'Load checkpoint {key} dst numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}'
dst_tensor.data.copy_(tp_hp_fragment.data)
else:
assert tp_hp_fragment.numel() == lp_frag_address.numel, \
f'Load checkpoint {key} dst numel {tp_hp_fragment.numel()} != src numel {lp_frag_address.numel}'
hp_mapping.optim_fragment[key] = tp_hp_fragment.clone().detach()
return step
def enable_universal_checkpoint(param_list):
for param in param_list:
param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, param)
| 252141.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
from collections import Counter
from typing import Union, List, Callable, Tuple
import torch
import penman
from penman import Graph
from hanlp.common.dataset import TransformableDataset
from hanlp.components.amr.seq2seq.dataset.IO import read_raw_amr_data
from hanlp.components.amr.seq2seq.dataset.penman import role_is_reverted
from hanlp.components.amr.seq2seq.dataset.tokenization_bart import PENMANBartTokenizer
from phrasetree.tree import Tree
import json
from hanlp_common.constant import BOS, EOS, ROOT
from hanlp_common.io import load_pickle
class AMRDataset(TransformableDataset):
def __init__(self,
data: Union[str, List],
use_recategorization=False,
remove_wiki=False,
dereify=False,
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None) -> None:
self.dereify = dereify
self.remove_wiki = remove_wiki
self.use_recategorization = use_recategorization
super().__init__(data, transform, cache, generate_idx)
def load_file(self, filepath: str):
graphs = read_raw_amr_data([filepath], self.use_recategorization, remove_wiki=self.remove_wiki,
dereify=self.dereify)
for g in graphs:
yield {'amr': g}
def get_roles(self):
roles = Counter()
for sample in self.data:
g: Graph = sample['amr']
for s, r, t in g.triples:
if role_is_reverted(r):
r = r[:-3]
roles[r] += 1
return roles
def get_frames(self):
frames = Counter()
for sample in self.data:
g: Graph = sample['amr']
for i in g.instances():
t = i.target
cells = t.split('-')
if len(cells) == 2 and len(cells[1]) == 2 and cells[1].isdigit():
frames[t] += 1
return frames
class AMRPickleDataset(AMRDataset):
def load_file(self, filepath: str):
items = torch.load(filepath)
for each in items:
each['amr'] = penman.decode(each['amr'])
yield each
def dfs_linearize_tokenize(sample: dict, tokenizer: PENMANBartTokenizer, remove_space=False, text_key='snt') -> dict:
amr = sample.get('amr', None)
if amr:
l, e = tokenizer.linearize(amr)
sample['graph_tokens'] = e['linearized_graphs']
sample['graph_token_ids'] = l
text = amr.metadata[text_key]
else:
text = sample['text']
if remove_space:
text = ''.join(text.split())
sample['text'] = text
sample['text_token_ids'] = tokenizer.encode(text)
return sample
def dfs_linearize_levi(sample: dict, tokenizer: PENMANBartTokenizer, remove_space=False) -> dict:
amr = sample.get('amr', None)
if amr:
l, e = tokenizer.linearize(amr)
sample['graph_tokens'] = e['linearized_graphs']
sample['graph_token_ids'] = l
tok = json.loads(amr.metadata['tok'])
dep = json.loads(amr.metadata['dep'])
levi = dep_to_levi(tok, dep)
sample['text'] = ' '.join(levi)
# ids = sum(tokenizer.batch_encode_plus([' ' + x for x in levi], add_special_tokens=False).input_ids, [])
ids = []
idx = 0
for t in levi:
if t in ('(', ')'):
ids.append(tokenizer.convert_tokens_to_ids(tokenizer.INIT + t))
else:
if idx % 2:
ids.extend(tokenizer.encode(t, add_special_tokens=False))
else:
ids.append(tokenizer.convert_tokens_to_ids(tokenizer.INIT + t))
idx += 1
sample['text_token_ids'] = [tokenizer.bos_token_id] + ids + [tokenizer.eos_token_id]
return sample
def dfs_linearize_rgcn(sample: dict, tokenizer: PENMANBartTokenizer) -> dict:
amr = sample.get('amr', None)
if amr:
l, e = tokenizer.linearize(amr)
sample['graph_tokens'] = e['linearized_graphs']
sample['graph_token_ids'] = l
tok = sample['tok']
sample['text'] = [tokenizer.cls_token] + [' ' + x for x in tok]
arc_scores = sample['dep']['scores']['arc_scores']
rel_scores = sample['dep']['scores']['rel_scores']
dep_graph = arc_scores[:, :, None] * rel_scores
root = torch.zeros((1,) + dep_graph.shape[1:])
sample['dep_graph'] = torch.cat([root, dep_graph], dim=0)
return sample
def dfs_linearize_constituency(sample: dict, tokenizer: PENMANBartTokenizer, remove_space=False) -> dict:
amr = sample.get('amr', None)
if amr:
l, e = tokenizer.linearize(amr)
sample['graph_tokens'] = e['linearized_graphs']
sample['graph_token_ids'] = l
tree = Tree.from_list(json.loads(sample['amr'].metadata['con_list']))
for each in tree.subtrees(lambda x: x.height() == 2):
if each[0] == '(':
each[0] = '<LBR>'
elif each[0] == ')':
each[0] = '<RBR>'
text = tree.pformat(margin=10e7)
tokens = []
buffer = []
for c in text:
if c == '(' or c == ')':
tokens.append(''.join(buffer))
tokens.append(c)
buffer.clear()
continue
buffer.append(c)
if buffer:
tokens.append(''.join(buffer))
tokens = [x.strip() for x in tokens]
tokens = [x for x in tokens if x]
restore_bracket = {'<LBR>': '(', '<RBR>': ')'}
tokens = [restore_bracket.get(x, x) for x in tokens]
ids = []
for each in tokens:
pairs = each.split(' ', 1)
if len(pairs) == 2:
con, token = pairs
ids.append(tokenizer.convert_tokens_to_ids(tokenizer.INIT + con))
ids.extend(tokenizer.encode(token, add_special_tokens=False))
else:
ids.append(tokenizer.convert_tokens_to_ids(tokenizer.INIT + each))
if remove_space:
text = ''.join(text.split())
sample['text'] = text
sample['text_token_ids'] = [tokenizer.bos_token_id] + ids + [tokenizer.eos_token_id]
return sample
def dfs_linearize_tokenize_with_linguistic_structures(sample: dict, tokenizer: PENMANBartTokenizer,
remove_space=False,
text_key='snt') -> dict:
amr = sample.get('amr', None)
if amr:
l, e = tokenizer.linearize(amr)
sample['graph_tokens'] = e['linearized_graphs']
sample['graph_token_ids'] = l
text = amr.metadata[text_key]
if remove_space:
text = ''.join(text.split())
sample['text'] = text
tok = json.loads(amr.metadata['tok'])
text_token_ids = tokenizer.batch_encode_plus(tok, add_special_tokens=False).input_ids
sample['text_token_ids'] = [tokenizer.bos_token_id] + sum(text_token_ids, []) + [tokenizer.eos_token_id]
pos = amr.metadata.get('pos', None)
if pos:
flat_pos = []
pos = json.loads(pos)
for subtokens, tag in zip(text_token_ids, pos):
flat_pos.extend([tag] * len(subtokens))
sample['pos'] = [BOS] + flat_pos + [EOS]
ner = amr.metadata.get('ner', None)
if ner is not None:
flat_ner = []
ner_spans = json.loads(ner)
ner = ['O'] * len(text_token_ids)
for form, tag, start, end in ner_spans:
ner[start:end] = [tag] * (end - start)
for subtokens, tag in zip(text_token_ids, ner):
flat_ner.extend([tag] * len(subtokens))
sample['ner'] = [BOS] + flat_ner + [EOS]
dep = amr.metadata.get('dep', None)
if dep:
token_to_1st_subtoken = [0]
num_subtokens = 1 # 1 for BOS
for subtokens in text_token_ids:
token_to_1st_subtoken.append(num_subtokens)
num_subtokens += len(subtokens)
flat_arc, flat_rel = [0], [BOS]
dep = json.loads(dep)
for subtokens, (arc, rel) in zip(text_token_ids, dep):
flat_arc.extend([token_to_1st_subtoken[arc]] * len(subtokens))
flat_rel.extend([rel] * len(subtokens))
sample['dep_arc'] = flat_arc + [0]
sample['dep_rel'] = flat_rel + [EOS]
return sample
def dep_to_levi(tok: List[str], dep: List[Tuple[int, str]]):
root = [i for i, x in enumerate(dep) if x[0] == 0][0]
seq = []
dfs(tok, dep, root, seq)
return seq
def dfs(tok: List[str], dep: List[Tuple[int, str]], s, seq):
seq.append(dep[s][1])
seq.append(tok[s])
children = [i for i, x in enumerate(dep) if x[0] == s + 1]
if children:
seq.append('(')
for child in children:
dfs(tok, dep, child, seq)
seq.append(')')
| 310532.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import os
import tempfile
import time
import requests
from unittest import mock
from urllib.request import urlopen
import pytest
from requests.structures import CaseInsensitiveDict
from httpie.downloads import (
parse_content_range, filename_from_content_disposition, filename_from_url,
get_unique_filename, ContentRangeError, Downloader, PARTIAL_CONTENT
)
from .utils import http, MockEnvironment
class Response(requests.Response):
# noinspection PyDefaultArgument
def __init__(self, url, headers={}, status_code=200):
self.url = url
self.headers = CaseInsensitiveDict(headers)
self.status_code = status_code
class TestDownloadUtils:
def test_Content_Range_parsing(self):
parse = parse_content_range
assert parse('bytes 100-199/200', 100) == 200
assert parse('bytes 100-199/*', 100) == 200
# single byte
assert parse('bytes 100-100/*', 100) == 101
# missing
pytest.raises(ContentRangeError, parse, None, 100)
# syntax error
pytest.raises(ContentRangeError, parse, 'beers 100-199/*', 100)
# unexpected range
pytest.raises(ContentRangeError, parse, 'bytes 100-199/*', 99)
# invalid instance-length
pytest.raises(ContentRangeError, parse, 'bytes 100-199/199', 100)
# invalid byte-range-resp-spec
pytest.raises(ContentRangeError, parse, 'bytes 100-99/199', 100)
@pytest.mark.parametrize('header, expected_filename', [
('attachment; filename=hello-WORLD_123.txt', 'hello-WORLD_123.txt'),
('attachment; filename=".hello-WORLD_123.txt"', 'hello-WORLD_123.txt'),
('attachment; filename="white space.txt"', 'white space.txt'),
(r'attachment; filename="\"quotes\".txt"', '"quotes".txt'),
('attachment; filename=/etc/hosts', 'hosts'),
('attachment; filename=', None)
])
def test_Content_Disposition_parsing(self, header, expected_filename):
assert filename_from_content_disposition(header) == expected_filename
def test_filename_from_url(self):
assert 'foo.txt' == filename_from_url(
url='http://example.org/foo',
content_type='text/plain'
)
assert 'foo.html' == filename_from_url(
url='http://example.org/foo',
content_type='text/html; charset=UTF-8'
)
assert 'foo' == filename_from_url(
url='http://example.org/foo',
content_type=None
)
assert 'foo' == filename_from_url(
url='http://example.org/foo',
content_type='x-foo/bar'
)
@pytest.mark.parametrize(
'orig_name, unique_on_attempt, expected',
[
# Simple
('foo.bar', 0, 'foo.bar'),
('foo.bar', 1, 'foo.bar-1'),
('foo.bar', 10, 'foo.bar-10'),
# Trim
('A' * 20, 0, 'A' * 10),
('A' * 20, 1, 'A' * 8 + '-1'),
('A' * 20, 10, 'A' * 7 + '-10'),
# Trim before ext
('A' * 20 + '.txt', 0, 'A' * 6 + '.txt'),
('A' * 20 + '.txt', 1, 'A' * 4 + '.txt-1'),
# Trim at the end
('foo.' + 'A' * 20, 0, 'foo.' + 'A' * 6),
('foo.' + 'A' * 20, 1, 'foo.' + 'A' * 4 + '-1'),
('foo.' + 'A' * 20, 10, 'foo.' + 'A' * 3 + '-10'),
]
)
@mock.patch('httpie.downloads.get_filename_max_length')
def test_unique_filename(self, get_filename_max_length,
orig_name, unique_on_attempt,
expected):
def attempts(unique_on_attempt=0):
# noinspection PyUnresolvedReferences,PyUnusedLocal
def exists(filename):
if exists.attempt == unique_on_attempt:
return False
exists.attempt += 1
return True
exists.attempt = 0
return exists
get_filename_max_length.return_value = 10
actual = get_unique_filename(orig_name, attempts(unique_on_attempt))
assert expected == actual
class TestDownloads:
def test_actual_download(self, httpbin_both, httpbin):
robots_txt = '/robots.txt'
body = urlopen(httpbin + robots_txt).read().decode()
env = MockEnvironment(stdin_isatty=True, stdout_isatty=False, show_displays=True)
r = http('--download', httpbin_both.url + robots_txt, env=env)
assert 'Downloading' in r.stderr
assert body == r
def test_download_with_Content_Length(self, mock_env, httpbin_both):
with open(os.devnull, 'w') as devnull:
downloader = Downloader(mock_env, output_file=devnull)
downloader.start(
initial_url='/',
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 10}
)
)
time.sleep(1.1)
downloader.chunk_downloaded(b'12345')
time.sleep(1.1)
downloader.chunk_downloaded(b'12345')
downloader.finish()
assert not downloader.interrupted
def test_download_no_Content_Length(self, mock_env, httpbin_both):
with open(os.devnull, 'w') as devnull:
downloader = Downloader(mock_env, output_file=devnull)
downloader.start(
final_response=Response(url=httpbin_both.url + '/'),
initial_url='/'
)
time.sleep(1.1)
downloader.chunk_downloaded(b'12345')
downloader.finish()
assert not downloader.interrupted
def test_download_output_from_content_disposition(self, mock_env, httpbin_both):
with tempfile.TemporaryDirectory() as tmp_dirname:
orig_cwd = os.getcwd()
os.chdir(tmp_dirname)
try:
assert not os.path.isfile('filename.bin')
downloader = Downloader(mock_env)
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={
'Content-Length': 5,
'Content-Disposition': 'attachment; filename="filename.bin"',
}
),
initial_url='/'
)
downloader.chunk_downloaded(b'12345')
downloader.finish()
downloader.failed() # Stop the reporter
assert not downloader.interrupted
# TODO: Auto-close the file in that case?
downloader._output_file.close()
assert os.path.isfile('filename.bin')
finally:
os.chdir(orig_cwd)
def test_download_interrupted(self, mock_env, httpbin_both):
with open(os.devnull, 'w') as devnull:
downloader = Downloader(mock_env, output_file=devnull)
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 5}
),
initial_url='/'
)
downloader.chunk_downloaded(b'1234')
downloader.finish()
assert downloader.interrupted
def test_download_resumed(self, mock_env, httpbin_both):
with tempfile.TemporaryDirectory() as tmp_dirname:
file = os.path.join(tmp_dirname, 'file.bin')
with open(file, 'a'):
pass
with open(file, 'a+b') as output_file:
# Start and interrupt the transfer after 3 bytes written
downloader = Downloader(mock_env, output_file=output_file)
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 5}
),
initial_url='/'
)
downloader.chunk_downloaded(b'123')
downloader.finish()
downloader.failed()
assert downloader.interrupted
# Write bytes
with open(file, 'wb') as fh:
fh.write(b'123')
with open(file, 'a+b') as output_file:
# Resume the transfer
downloader = Downloader(mock_env, output_file=output_file, resume=True)
# Ensure `pre_request()` is working as expected too
headers = {}
downloader.pre_request(headers)
assert headers['Accept-Encoding'] == 'identity'
assert headers['Range'] == 'bytes=3-'
downloader.start(
final_response=Response(
url=httpbin_both.url + '/',
headers={'Content-Length': 5, 'Content-Range': 'bytes 3-4/5'},
status_code=PARTIAL_CONTENT
),
initial_url='/'
)
downloader.chunk_downloaded(b'45')
downloader.finish()
def test_download_with_redirect_original_url_used_for_filename(self, httpbin):
# Redirect from `/redirect/1` to `/get`.
expected_filename = '1.json'
orig_cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tmp_dirname:
os.chdir(tmp_dirname)
try:
assert os.listdir('.') == []
http('--download', httpbin + '/redirect/1')
assert os.listdir('.') == [expected_filename]
finally:
os.chdir(orig_cwd)
| 087698.py | [
"CWE-939: Improper Authorization in Handler for Custom URL Scheme"
] |
#!/usr/bin/env python3
"""Extract Mel spectrograms with teacher forcing."""
import argparse
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from TTS.config import load_config
from TTS.tts.datasets import TTSDataset, load_tts_samples
from TTS.tts.models import setup_model
from TTS.tts.utils.speakers import SpeakerManager
from TTS.tts.utils.text.tokenizer import TTSTokenizer
from TTS.utils.audio import AudioProcessor
from TTS.utils.audio.numpy_transforms import quantize
from TTS.utils.generic_utils import count_parameters
use_cuda = torch.cuda.is_available()
def setup_loader(ap, r, verbose=False):
tokenizer, _ = TTSTokenizer.init_from_config(c)
dataset = TTSDataset(
outputs_per_step=r,
compute_linear_spec=False,
samples=meta_data,
tokenizer=tokenizer,
ap=ap,
batch_group_size=0,
min_text_len=c.min_text_len,
max_text_len=c.max_text_len,
min_audio_len=c.min_audio_len,
max_audio_len=c.max_audio_len,
phoneme_cache_path=c.phoneme_cache_path,
precompute_num_workers=0,
use_noise_augment=False,
verbose=verbose,
speaker_id_mapping=speaker_manager.name_to_id if c.use_speaker_embedding else None,
d_vector_mapping=speaker_manager.embeddings if c.use_d_vector_file else None,
)
if c.use_phonemes and c.compute_input_seq_cache:
# precompute phonemes to have a better estimate of sequence lengths.
dataset.compute_input_seq(c.num_loader_workers)
dataset.preprocess_samples()
loader = DataLoader(
dataset,
batch_size=c.batch_size,
shuffle=False,
collate_fn=dataset.collate_fn,
drop_last=False,
sampler=None,
num_workers=c.num_loader_workers,
pin_memory=False,
)
return loader
def set_filename(wav_path, out_path):
wav_file = os.path.basename(wav_path)
file_name = wav_file.split(".")[0]
os.makedirs(os.path.join(out_path, "quant"), exist_ok=True)
os.makedirs(os.path.join(out_path, "mel"), exist_ok=True)
os.makedirs(os.path.join(out_path, "wav_gl"), exist_ok=True)
os.makedirs(os.path.join(out_path, "wav"), exist_ok=True)
wavq_path = os.path.join(out_path, "quant", file_name)
mel_path = os.path.join(out_path, "mel", file_name)
wav_gl_path = os.path.join(out_path, "wav_gl", file_name + ".wav")
wav_path = os.path.join(out_path, "wav", file_name + ".wav")
return file_name, wavq_path, mel_path, wav_gl_path, wav_path
def format_data(data):
# setup input data
text_input = data["token_id"]
text_lengths = data["token_id_lengths"]
mel_input = data["mel"]
mel_lengths = data["mel_lengths"]
item_idx = data["item_idxs"]
d_vectors = data["d_vectors"]
speaker_ids = data["speaker_ids"]
attn_mask = data["attns"]
avg_text_length = torch.mean(text_lengths.float())
avg_spec_length = torch.mean(mel_lengths.float())
# dispatch data to GPU
if use_cuda:
text_input = text_input.cuda(non_blocking=True)
text_lengths = text_lengths.cuda(non_blocking=True)
mel_input = mel_input.cuda(non_blocking=True)
mel_lengths = mel_lengths.cuda(non_blocking=True)
if speaker_ids is not None:
speaker_ids = speaker_ids.cuda(non_blocking=True)
if d_vectors is not None:
d_vectors = d_vectors.cuda(non_blocking=True)
if attn_mask is not None:
attn_mask = attn_mask.cuda(non_blocking=True)
return (
text_input,
text_lengths,
mel_input,
mel_lengths,
speaker_ids,
d_vectors,
avg_text_length,
avg_spec_length,
attn_mask,
item_idx,
)
@torch.no_grad()
def inference(
model_name,
model,
ap,
text_input,
text_lengths,
mel_input,
mel_lengths,
speaker_ids=None,
d_vectors=None,
):
if model_name == "glow_tts":
speaker_c = None
if speaker_ids is not None:
speaker_c = speaker_ids
elif d_vectors is not None:
speaker_c = d_vectors
outputs = model.inference_with_MAS(
text_input,
text_lengths,
mel_input,
mel_lengths,
aux_input={"d_vectors": speaker_c, "speaker_ids": speaker_ids},
)
model_output = outputs["model_outputs"]
model_output = model_output.detach().cpu().numpy()
elif "tacotron" in model_name:
aux_input = {"speaker_ids": speaker_ids, "d_vectors": d_vectors}
outputs = model(text_input, text_lengths, mel_input, mel_lengths, aux_input)
postnet_outputs = outputs["model_outputs"]
# normalize tacotron output
if model_name == "tacotron":
mel_specs = []
postnet_outputs = postnet_outputs.data.cpu().numpy()
for b in range(postnet_outputs.shape[0]):
postnet_output = postnet_outputs[b]
mel_specs.append(torch.FloatTensor(ap.out_linear_to_mel(postnet_output.T).T))
model_output = torch.stack(mel_specs).cpu().numpy()
elif model_name == "tacotron2":
model_output = postnet_outputs.detach().cpu().numpy()
return model_output
def extract_spectrograms(
data_loader, model, ap, output_path, quantize_bits=0, save_audio=False, debug=False, metada_name="metada.txt"
):
model.eval()
export_metadata = []
for _, data in tqdm(enumerate(data_loader), total=len(data_loader)):
# format data
(
text_input,
text_lengths,
mel_input,
mel_lengths,
speaker_ids,
d_vectors,
_,
_,
_,
item_idx,
) = format_data(data)
model_output = inference(
c.model.lower(),
model,
ap,
text_input,
text_lengths,
mel_input,
mel_lengths,
speaker_ids,
d_vectors,
)
for idx in range(text_input.shape[0]):
wav_file_path = item_idx[idx]
wav = ap.load_wav(wav_file_path)
_, wavq_path, mel_path, wav_gl_path, wav_path = set_filename(wav_file_path, output_path)
# quantize and save wav
if quantize_bits > 0:
wavq = quantize(wav, quantize_bits)
np.save(wavq_path, wavq)
# save TTS mel
mel = model_output[idx]
mel_length = mel_lengths[idx]
mel = mel[:mel_length, :].T
np.save(mel_path, mel)
export_metadata.append([wav_file_path, mel_path])
if save_audio:
ap.save_wav(wav, wav_path)
if debug:
print("Audio for debug saved at:", wav_gl_path)
wav = ap.inv_melspectrogram(mel)
ap.save_wav(wav, wav_gl_path)
with open(os.path.join(output_path, metada_name), "w", encoding="utf-8") as f:
for data in export_metadata:
f.write(f"{data[0]}|{data[1]+'.npy'}\n")
def main(args): # pylint: disable=redefined-outer-name
# pylint: disable=global-variable-undefined
global meta_data, speaker_manager
# Audio processor
ap = AudioProcessor(**c.audio)
# load data instances
meta_data_train, meta_data_eval = load_tts_samples(
c.datasets, eval_split=args.eval, eval_split_max_size=c.eval_split_max_size, eval_split_size=c.eval_split_size
)
# use eval and training partitions
meta_data = meta_data_train + meta_data_eval
# init speaker manager
if c.use_speaker_embedding:
speaker_manager = SpeakerManager(data_items=meta_data)
elif c.use_d_vector_file:
speaker_manager = SpeakerManager(d_vectors_file_path=c.d_vector_file)
else:
speaker_manager = None
# setup model
model = setup_model(c)
# restore model
model.load_checkpoint(c, args.checkpoint_path, eval=True)
if use_cuda:
model.cuda()
num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True)
# set r
r = 1 if c.model.lower() == "glow_tts" else model.decoder.r
own_loader = setup_loader(ap, r, verbose=True)
extract_spectrograms(
own_loader,
model,
ap,
args.output_path,
quantize_bits=args.quantize_bits,
save_audio=args.save_audio,
debug=args.debug,
metada_name="metada.txt",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config_path", type=str, help="Path to config file for training.", required=True)
parser.add_argument("--checkpoint_path", type=str, help="Model file to be restored.", required=True)
parser.add_argument("--output_path", type=str, help="Path to save mel specs", required=True)
parser.add_argument("--debug", default=False, action="store_true", help="Save audio files for debug")
parser.add_argument("--save_audio", default=False, action="store_true", help="Save audio files")
parser.add_argument("--quantize_bits", type=int, default=0, help="Save quantized audio files if non-zero")
parser.add_argument("--eval", type=bool, help="compute eval.", default=True)
args = parser.parse_args()
c = load_config(args.config_path)
c.audio.trim_silence = False
main(args)
| 614629.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import traceback
import torch
from torch.utils.data import DataLoader
from trainer.io import copy_model_files, save_best_model, save_checkpoint
from trainer.torch import NoamLR
from trainer.trainer_utils import get_optimizer
from TTS.encoder.dataset import EncoderDataset
from TTS.encoder.utils.generic_utils import setup_encoder_model
from TTS.encoder.utils.training import init_training
from TTS.encoder.utils.visual import plot_embeddings
from TTS.tts.datasets import load_tts_samples
from TTS.utils.audio import AudioProcessor
from TTS.utils.generic_utils import count_parameters, remove_experiment_folder
from TTS.utils.samplers import PerfectBatchSampler
from TTS.utils.training import check_update
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(54321)
use_cuda = torch.cuda.is_available()
num_gpus = torch.cuda.device_count()
print(" > Using CUDA: ", use_cuda)
print(" > Number of GPUs: ", num_gpus)
def setup_loader(ap: AudioProcessor, is_val: bool = False, verbose: bool = False):
num_utter_per_class = c.num_utter_per_class if not is_val else c.eval_num_utter_per_class
num_classes_in_batch = c.num_classes_in_batch if not is_val else c.eval_num_classes_in_batch
dataset = EncoderDataset(
c,
ap,
meta_data_eval if is_val else meta_data_train,
voice_len=c.voice_len,
num_utter_per_class=num_utter_per_class,
num_classes_in_batch=num_classes_in_batch,
verbose=verbose,
augmentation_config=c.audio_augmentation if not is_val else None,
use_torch_spec=c.model_params.get("use_torch_spec", False),
)
# get classes list
classes = dataset.get_class_list()
sampler = PerfectBatchSampler(
dataset.items,
classes,
batch_size=num_classes_in_batch * num_utter_per_class, # total batch size
num_classes_in_batch=num_classes_in_batch,
num_gpus=1,
shuffle=not is_val,
drop_last=True,
)
if len(classes) < num_classes_in_batch:
if is_val:
raise RuntimeError(
f"config.eval_num_classes_in_batch ({num_classes_in_batch}) need to be <= {len(classes)} (Number total of Classes in the Eval dataset) !"
)
raise RuntimeError(
f"config.num_classes_in_batch ({num_classes_in_batch}) need to be <= {len(classes)} (Number total of Classes in the Train dataset) !"
)
# set the classes to avoid get wrong class_id when the number of training and eval classes are not equal
if is_val:
dataset.set_classes(train_classes)
loader = DataLoader(
dataset,
num_workers=c.num_loader_workers,
batch_sampler=sampler,
collate_fn=dataset.collate_fn,
)
return loader, classes, dataset.get_map_classid_to_classname()
def evaluation(model, criterion, data_loader, global_step):
eval_loss = 0
for _, data in enumerate(data_loader):
with torch.no_grad():
# setup input data
inputs, labels = data
# agroup samples of each class in the batch. perfect sampler produces [3,2,1,3,2,1] we need [3,3,2,2,1,1]
labels = torch.transpose(
labels.view(c.eval_num_utter_per_class, c.eval_num_classes_in_batch), 0, 1
).reshape(labels.shape)
inputs = torch.transpose(
inputs.view(c.eval_num_utter_per_class, c.eval_num_classes_in_batch, -1), 0, 1
).reshape(inputs.shape)
# dispatch data to GPU
if use_cuda:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
# forward pass model
outputs = model(inputs)
# loss computation
loss = criterion(
outputs.view(c.eval_num_classes_in_batch, outputs.shape[0] // c.eval_num_classes_in_batch, -1), labels
)
eval_loss += loss.item()
eval_avg_loss = eval_loss / len(data_loader)
# save stats
dashboard_logger.eval_stats(global_step, {"loss": eval_avg_loss})
# plot the last batch in the evaluation
figures = {
"UMAP Plot": plot_embeddings(outputs.detach().cpu().numpy(), c.num_classes_in_batch),
}
dashboard_logger.eval_figures(global_step, figures)
return eval_avg_loss
def train(model, optimizer, scheduler, criterion, data_loader, eval_data_loader, global_step):
model.train()
best_loss = {"train_loss": None, "eval_loss": float("inf")}
avg_loader_time = 0
end_time = time.time()
for epoch in range(c.epochs):
tot_loss = 0
epoch_time = 0
for _, data in enumerate(data_loader):
start_time = time.time()
# setup input data
inputs, labels = data
# agroup samples of each class in the batch. perfect sampler produces [3,2,1,3,2,1] we need [3,3,2,2,1,1]
labels = torch.transpose(labels.view(c.num_utter_per_class, c.num_classes_in_batch), 0, 1).reshape(
labels.shape
)
inputs = torch.transpose(inputs.view(c.num_utter_per_class, c.num_classes_in_batch, -1), 0, 1).reshape(
inputs.shape
)
# ToDo: move it to a unit test
# labels_converted = torch.transpose(labels.view(c.num_utter_per_class, c.num_classes_in_batch), 0, 1).reshape(labels.shape)
# inputs_converted = torch.transpose(inputs.view(c.num_utter_per_class, c.num_classes_in_batch, -1), 0, 1).reshape(inputs.shape)
# idx = 0
# for j in range(0, c.num_classes_in_batch, 1):
# for i in range(j, len(labels), c.num_classes_in_batch):
# if not torch.all(labels[i].eq(labels_converted[idx])) or not torch.all(inputs[i].eq(inputs_converted[idx])):
# print("Invalid")
# print(labels)
# exit()
# idx += 1
# labels = labels_converted
# inputs = inputs_converted
loader_time = time.time() - end_time
global_step += 1
# setup lr
if c.lr_decay:
scheduler.step()
optimizer.zero_grad()
# dispatch data to GPU
if use_cuda:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
# forward pass model
outputs = model(inputs)
# loss computation
loss = criterion(
outputs.view(c.num_classes_in_batch, outputs.shape[0] // c.num_classes_in_batch, -1), labels
)
loss.backward()
grad_norm, _ = check_update(model, c.grad_clip)
optimizer.step()
step_time = time.time() - start_time
epoch_time += step_time
# acumulate the total epoch loss
tot_loss += loss.item()
# Averaged Loader Time
num_loader_workers = c.num_loader_workers if c.num_loader_workers > 0 else 1
avg_loader_time = (
1 / num_loader_workers * loader_time + (num_loader_workers - 1) / num_loader_workers * avg_loader_time
if avg_loader_time != 0
else loader_time
)
current_lr = optimizer.param_groups[0]["lr"]
if global_step % c.steps_plot_stats == 0:
# Plot Training Epoch Stats
train_stats = {
"loss": loss.item(),
"lr": current_lr,
"grad_norm": grad_norm,
"step_time": step_time,
"avg_loader_time": avg_loader_time,
}
dashboard_logger.train_epoch_stats(global_step, train_stats)
figures = {
"UMAP Plot": plot_embeddings(outputs.detach().cpu().numpy(), c.num_classes_in_batch),
}
dashboard_logger.train_figures(global_step, figures)
if global_step % c.print_step == 0:
print(
" | > Step:{} Loss:{:.5f} GradNorm:{:.5f} "
"StepTime:{:.2f} LoaderTime:{:.2f} AvGLoaderTime:{:.2f} LR:{:.6f}".format(
global_step, loss.item(), grad_norm, step_time, loader_time, avg_loader_time, current_lr
),
flush=True,
)
if global_step % c.save_step == 0:
# save model
save_checkpoint(
c, model, optimizer, None, global_step, epoch, OUT_PATH, criterion=criterion.state_dict()
)
end_time = time.time()
print("")
print(
">>> Epoch:{} AvgLoss: {:.5f} GradNorm:{:.5f} "
"EpochTime:{:.2f} AvGLoaderTime:{:.2f} ".format(
epoch, tot_loss / len(data_loader), grad_norm, epoch_time, avg_loader_time
),
flush=True,
)
# evaluation
if c.run_eval:
model.eval()
eval_loss = evaluation(model, criterion, eval_data_loader, global_step)
print("\n\n")
print("--> EVAL PERFORMANCE")
print(
" | > Epoch:{} AvgLoss: {:.5f} ".format(epoch, eval_loss),
flush=True,
)
# save the best checkpoint
best_loss = save_best_model(
{"train_loss": None, "eval_loss": eval_loss},
best_loss,
c,
model,
optimizer,
None,
global_step,
epoch,
OUT_PATH,
criterion=criterion.state_dict(),
)
model.train()
return best_loss, global_step
def main(args): # pylint: disable=redefined-outer-name
# pylint: disable=global-variable-undefined
global meta_data_train
global meta_data_eval
global train_classes
ap = AudioProcessor(**c.audio)
model = setup_encoder_model(c)
optimizer = get_optimizer(c.optimizer, c.optimizer_params, c.lr, model)
# pylint: disable=redefined-outer-name
meta_data_train, meta_data_eval = load_tts_samples(c.datasets, eval_split=True)
train_data_loader, train_classes, map_classid_to_classname = setup_loader(ap, is_val=False, verbose=True)
if c.run_eval:
eval_data_loader, _, _ = setup_loader(ap, is_val=True, verbose=True)
else:
eval_data_loader = None
num_classes = len(train_classes)
criterion = model.get_criterion(c, num_classes)
if c.loss == "softmaxproto" and c.model != "speaker_encoder":
c.map_classid_to_classname = map_classid_to_classname
copy_model_files(c, OUT_PATH, new_fields={})
if args.restore_path:
criterion, args.restore_step = model.load_checkpoint(
c, args.restore_path, eval=False, use_cuda=use_cuda, criterion=criterion
)
print(" > Model restored from step %d" % args.restore_step, flush=True)
else:
args.restore_step = 0
if c.lr_decay:
scheduler = NoamLR(optimizer, warmup_steps=c.warmup_steps, last_epoch=args.restore_step - 1)
else:
scheduler = None
num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True)
if use_cuda:
model = model.cuda()
criterion.cuda()
global_step = args.restore_step
_, global_step = train(model, optimizer, scheduler, criterion, train_data_loader, eval_data_loader, global_step)
if __name__ == "__main__":
args, c, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger = init_training()
try:
main(args)
except KeyboardInterrupt:
remove_experiment_folder(OUT_PATH)
try:
sys.exit(0)
except SystemExit:
os._exit(0) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
remove_experiment_folder(OUT_PATH)
traceback.print_exc()
sys.exit(1)
| 588795.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
import os
from glob import glob
from typing import Dict, List
import librosa
import numpy as np
import torch
import torchaudio
from scipy.io.wavfile import read
from TTS.utils.audio.torch_transforms import TorchSTFT
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
if data.dtype == np.int32:
norm_fix = 2**31
elif data.dtype == np.int16:
norm_fix = 2**15
elif data.dtype == np.float16 or data.dtype == np.float32:
norm_fix = 1.0
else:
raise NotImplementedError(f"Provided data dtype not supported: {data.dtype}")
return (torch.FloatTensor(data.astype(np.float32)) / norm_fix, sampling_rate)
def check_audio(audio, audiopath: str):
# Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk.
# '2' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds.
if torch.any(audio > 2) or not torch.any(audio < 0):
print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}")
audio.clip_(-1, 1)
def read_audio_file(audiopath: str):
if audiopath[-4:] == ".wav":
audio, lsr = load_wav_to_torch(audiopath)
elif audiopath[-4:] == ".mp3":
audio, lsr = librosa.load(audiopath, sr=None)
audio = torch.FloatTensor(audio)
else:
assert False, f"Unsupported audio format provided: {audiopath[-4:]}"
# Remove any channel data.
if len(audio.shape) > 1:
if audio.shape[0] < 5:
audio = audio[0]
else:
assert audio.shape[1] < 5
audio = audio[:, 0]
return audio, lsr
def load_required_audio(audiopath: str):
audio, lsr = read_audio_file(audiopath)
audios = [torchaudio.functional.resample(audio, lsr, sampling_rate) for sampling_rate in (22050, 24000)]
for audio in audios:
check_audio(audio, audiopath)
return [audio.unsqueeze(0) for audio in audios]
def load_audio(audiopath, sampling_rate):
audio, lsr = read_audio_file(audiopath)
if lsr != sampling_rate:
audio = torchaudio.functional.resample(audio, lsr, sampling_rate)
check_audio(audio, audiopath)
return audio.unsqueeze(0)
TACOTRON_MEL_MAX = 2.3143386840820312
TACOTRON_MEL_MIN = -11.512925148010254
def denormalize_tacotron_mel(norm_mel):
return ((norm_mel + 1) / 2) * (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN) + TACOTRON_MEL_MIN
def normalize_tacotron_mel(mel):
return 2 * ((mel - TACOTRON_MEL_MIN) / (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN)) - 1
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
def get_voices(extra_voice_dirs: List[str] = []):
dirs = extra_voice_dirs
voices: Dict[str, List[str]] = {}
for d in dirs:
subs = os.listdir(d)
for sub in subs:
subj = os.path.join(d, sub)
if os.path.isdir(subj):
voices[sub] = list(glob(f"{subj}/*.wav")) + list(glob(f"{subj}/*.mp3")) + list(glob(f"{subj}/*.pth"))
return voices
def load_voice(voice: str, extra_voice_dirs: List[str] = []):
if voice == "random":
return None, None
voices = get_voices(extra_voice_dirs)
paths = voices[voice]
if len(paths) == 1 and paths[0].endswith(".pth"):
return None, torch.load(paths[0])
else:
conds = []
for cond_path in paths:
c = load_required_audio(cond_path)
conds.append(c)
return conds, None
def load_voices(voices: List[str], extra_voice_dirs: List[str] = []):
latents = []
clips = []
for voice in voices:
if voice == "random":
if len(voices) > 1:
print("Cannot combine a random voice with a non-random voice. Just using a random voice.")
return None, None
clip, latent = load_voice(voice, extra_voice_dirs)
if latent is None:
assert (
len(latents) == 0
), "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
clips.extend(clip)
elif clip is None:
assert (
len(clips) == 0
), "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
latents.append(latent)
if len(latents) == 0:
return clips, None
else:
latents_0 = torch.stack([l[0] for l in latents], dim=0).mean(dim=0)
latents_1 = torch.stack([l[1] for l in latents], dim=0).mean(dim=0)
latents = (latents_0, latents_1)
return None, latents
def wav_to_univnet_mel(wav, do_normalization=False, device="cuda"):
stft = TorchSTFT(
n_fft=1024,
hop_length=256,
win_length=1024,
use_mel=True,
n_mels=100,
sample_rate=24000,
mel_fmin=0,
mel_fmax=12000,
)
stft = stft.to(device)
mel = stft(wav)
mel = dynamic_range_compression(mel)
if do_normalization:
mel = normalize_tacotron_mel(mel)
return mel
| 779518.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import re
import importlib
import inspect
from enum import Enum
from dataclasses import dataclass
from typing import Optional, List, Tuple, Set, Dict
_SPHINX_AUTOSUMMARY_HEADER = ".. autosummary::"
_SPHINX_AUTOCLASS_HEADER = ".. autoclass::"
# This is a special character used in autosummary to render only the api shortname, for
# example ~module.api_name will render only api_name
_SPHINX_AUTODOC_SHORTNAME = "~"
class AnnotationType(Enum):
PUBLIC_API = "PublicAPI"
DEVELOPER_API = "DeveloperAPI"
DEPRECATED = "Deprecated"
UNKNOWN = "Unknown"
class CodeType(Enum):
CLASS = "Class"
FUNCTION = "Function"
@dataclass
class API:
name: str
annotation_type: AnnotationType
code_type: CodeType
@staticmethod
def from_autosummary(doc: str, current_module: Optional[str] = None) -> List["API"]:
"""
Parse API from the following autosummary sphinx block.
.. autosummary::
:option_01
:option_02
api_01
api_02
"""
apis = []
lines = doc.splitlines()
if not lines:
return apis
if lines[0].strip() != _SPHINX_AUTOSUMMARY_HEADER:
return apis
for line in lines:
if line == _SPHINX_AUTOSUMMARY_HEADER:
continue
if line.strip().startswith(":"):
# option lines
continue
if line.strip().startswith(".."):
# comment lines
continue
if not line.strip():
# empty lines
continue
if not re.match(r"\s", line):
# end of autosummary, \s means empty space, this line is checking if
# the line is not empty and not starting with empty space
break
attribute = line.strip().removeprefix(_SPHINX_AUTODOC_SHORTNAME)
api_name = f"{current_module}.{attribute}" if current_module else attribute
apis.append(
API(
name=api_name,
annotation_type=AnnotationType.PUBLIC_API,
code_type=CodeType.FUNCTION,
)
)
return apis
@staticmethod
def from_autoclass(
doc: str, current_module: Optional[str] = None
) -> Optional["API"]:
"""
Parse API from the following autoclass sphinx block.
.. autoclass:: api_01
"""
doc = doc.strip()
if not doc.startswith(_SPHINX_AUTOCLASS_HEADER):
return None
cls = (
doc[len(_SPHINX_AUTOCLASS_HEADER) :]
.strip()
.removeprefix(_SPHINX_AUTODOC_SHORTNAME)
)
api_name = f"{current_module}.{cls}" if current_module else cls
return API(
name=api_name,
annotation_type=AnnotationType.PUBLIC_API,
code_type=CodeType.CLASS,
)
def get_canonical_name(self) -> str:
"""
Some APIs have aliases declared in __init__.py file (see ray/data/__init__.py
for example). This method converts the alias to full name. This is to make sure
out analysis can be performed on the same set of canonial names.
"""
tokens = self.name.split(".")
# convert the name into a python object, by converting the module token by token
attribute = importlib.import_module(tokens[0])
for token in tokens[1:]:
if not hasattr(attribute, token):
# return as it is if the name seems malformed
return self.name
attribute = getattr(attribute, token)
if inspect.isclass(attribute) or inspect.isfunction(attribute):
return f"{attribute.__module__}.{attribute.__qualname__}"
return self.name
def _is_private_name(self) -> bool:
"""
Check if this API has a private name. Private names are those that start with
underscores.
"""
name_has_underscore = self.name.split(".")[-1].startswith("_")
is_internal = "._internal." in self.name
return name_has_underscore or is_internal
def is_public(self) -> bool:
"""
Check if this API is public. Public APIs are those that are annotated as public
and not have private names.
"""
return (
self.annotation_type == AnnotationType.PUBLIC_API
and not self._is_private_name()
)
def is_deprecated(self) -> bool:
"""
Check if this API is deprecated. Deprecated APIs are those that are annotated as
deprecated.
"""
return self.annotation_type == AnnotationType.DEPRECATED
@staticmethod
def split_good_and_bad_apis(
api_in_codes: Dict[str, "API"], api_in_docs: Set[str], white_list_apis: Set[str]
) -> Tuple[List[str]]:
"""
Given the APIs in the codebase and the documentation, split the APIs into good
and bad APIs. Good APIs are those that are public and documented, bad APIs are
those that are public but NOT documented.
"""
good_apis = []
bad_apis = []
for name, api in api_in_codes.items():
if not api.is_public():
continue
if name in white_list_apis:
continue
if name in api_in_docs:
good_apis.append(name)
else:
bad_apis.append(name)
return good_apis, bad_apis
| 210196.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
import os.path
import subprocess
import platform
from PyQt5 import QtGui
from PyQt5.QtCore import QSize, pyqtSignal, Qt, QThread
from PyQt5.QtGui import QPainter, QFont, QColor, QPixmap, QPolygon, QFontMetrics
from PyQt5.QtWidgets import QWidget, QLabel, QHBoxLayout, QSizePolicy, QVBoxLayout, QSpacerItem, \
QScrollArea
from app.components.scroll_bar import ScrollBar
class MessageType:
Text = 1
Image = 3
class TextMessage(QLabel):
heightSingal = pyqtSignal(int)
def __init__(self, text, is_send=False, parent=None):
if isinstance(text, bytes):
text = text.decode('utf-8')
super(TextMessage, self).__init__(text, parent)
font = QFont('微软雅黑', 12)
self.setFont(font)
self.setWordWrap(True)
self.setMaximumWidth(800)
# self.setMinimumWidth(100)
self.setMinimumHeight(45)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
if is_send:
self.setAlignment(Qt.AlignCenter | Qt.AlignRight)
self.setStyleSheet(
'''
background-color:#b2e281;
border-radius:10px;
padding:10px;
'''
)
else:
self.setStyleSheet(
'''
background-color:white;
border-radius:10px;
padding:10px;
'''
)
font_metrics = QFontMetrics(font)
rect = font_metrics.boundingRect(text)
# rect = font_metrics
self.setMaximumWidth(rect.width() + 40)
def paintEvent(self, a0: QtGui.QPaintEvent) -> None:
super(TextMessage, self).paintEvent(a0)
class Triangle(QLabel):
def __init__(self, Type, is_send=False, position=(0, 0), parent=None):
"""
@param Type:
@param is_send:
@param position:(x,y)
@param parent:
"""
super().__init__(parent)
self.Type = Type
self.is_send = is_send
self.position = position
def paintEvent(self, a0: QtGui.QPaintEvent) -> None:
super(Triangle, self).paintEvent(a0)
if self.Type == MessageType.Text:
self.setFixedSize(6, 45)
painter = QPainter(self)
triangle = QPolygon()
x, y = self.position
if self.is_send:
painter.setPen(QColor('#b2e281'))
painter.setBrush(QColor('#b2e281'))
triangle.setPoints(0, 20+y, 0, 34+y, 6, 27+y)
else:
painter.setPen(QColor('white'))
painter.setBrush(QColor('white'))
triangle.setPoints(0, 27+y, 6, 20+y, 6, 34+y)
painter.drawPolygon(triangle)
class Notice(QLabel):
def __init__(self, text, type_=3, parent=None):
super().__init__(text, parent)
self.type_ = type_
self.setFont(QFont('微软雅黑', 10))
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.setAlignment(Qt.AlignCenter)
class Avatar(QLabel):
def __init__(self, avatar, parent=None):
super().__init__(parent)
if isinstance(avatar, str):
self.setPixmap(QPixmap(avatar).scaled(45, 45))
self.image_path = avatar
elif isinstance(avatar, QPixmap):
self.setPixmap(avatar.scaled(45, 45))
self.setFixedSize(QSize(45, 45))
def open_image_viewer(file_path):
system_platform = platform.system()
if system_platform == "Darwin": # macOS
subprocess.run(["open", file_path])
elif system_platform == "Windows":
subprocess.run(["start", " ", file_path], shell=True)
elif system_platform == "Linux":
subprocess.run(["xdg-open", file_path])
else:
print("Unsupported platform")
class OpenImageThread(QThread):
def __init__(self, image_path):
super().__init__()
self.image_path = image_path
def run(self) -> None:
if os.path.exists(self.image_path):
open_image_viewer(self.image_path)
class ImageMessage(QLabel):
def __init__(self, image, is_send, image_link='', max_width=480, max_height=240, parent=None):
"""
param:image 图像路径或者QPixmap对象
param:image_link='' 点击图像打开的文件路径
"""
super().__init__(parent)
self.image = QLabel(self)
self.max_width = max_width
self.max_height = max_height
# self.setFixedSize(self.max_width,self.max_height)
self.setMaximumWidth(self.max_width)
self.setMaximumHeight(self.max_height)
self.setCursor(Qt.PointingHandCursor)
if isinstance(image, str):
pixmap = QPixmap(image)
self.image_path = image
elif isinstance(image, QPixmap):
pixmap = image
self.set_image(pixmap)
if image_link:
self.image_path = image_link
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
if is_send:
self.setAlignment(Qt.AlignCenter | Qt.AlignRight)
# self.setScaledContents(True)
def set_image(self, pixmap):
# 计算调整后的大小
adjusted_width = min(pixmap.width(), self.max_width)
adjusted_height = min(pixmap.height(), self.max_height)
self.setPixmap(pixmap.scaled(adjusted_width, adjusted_height, Qt.KeepAspectRatio))
# 调整QLabel的大小以适应图片的宽高,但不超过最大宽高
# self.setFixedSize(adjusted_width, adjusted_height)
def mousePressEvent(self, event):
if event.buttons() == Qt.LeftButton: # 左键按下
print('打开图像', self.image_path)
self.open_image_thread = OpenImageThread(self.image_path)
self.open_image_thread.start()
class BubbleMessage(QWidget):
def __init__(self, str_content, avatar, Type, is_send=False, display_name=None, parent=None):
super().__init__(parent)
self.isSend = is_send
# self.set
self.setStyleSheet(
'''
border:none;
'''
)
layout = QHBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 5, 5, 5)
# self.resize(QSize(200, 50))
self.avatar = Avatar(avatar)
triangle = Triangle(Type, is_send, (0, 0))
if Type == MessageType.Text:
self.message = TextMessage(str_content, is_send)
# self.message.setMaximumWidth(int(self.width() * 0.6))
elif Type == MessageType.Image:
self.message = ImageMessage(str_content, is_send)
else:
raise ValueError("未知的消息类型")
if display_name:
triangle = Triangle(Type, is_send, (0, 10))
label_name = QLabel(display_name, self)
label_name.setFont(QFont('微软雅黑', 10))
if is_send:
label_name.setAlignment(Qt.AlignRight)
vlayout = QVBoxLayout()
vlayout.setSpacing(0)
if is_send:
vlayout.addWidget(label_name, 0, Qt.AlignTop | Qt.AlignRight)
vlayout.addWidget(self.message, 0, Qt.AlignTop | Qt.AlignRight)
else:
vlayout.addWidget(label_name)
vlayout.addWidget(self.message)
self.spacerItem = QSpacerItem(45 + 6, 45, QSizePolicy.Expanding, QSizePolicy.Minimum)
if is_send:
layout.addItem(self.spacerItem)
if display_name:
layout.addLayout(vlayout, 1)
else:
layout.addWidget(self.message, 1)
layout.addWidget(triangle, 0, Qt.AlignTop | Qt.AlignLeft)
layout.addWidget(self.avatar, 0, Qt.AlignTop | Qt.AlignLeft)
else:
layout.addWidget(self.avatar, 0, Qt.AlignTop | Qt.AlignRight)
layout.addWidget(triangle, 0, Qt.AlignTop | Qt.AlignRight)
if display_name:
layout.addLayout(vlayout, 1)
else:
layout.addWidget(self.message, 1)
layout.addItem(self.spacerItem)
self.setLayout(layout)
class ScrollAreaContent(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.adjustSize()
class ScrollArea(QScrollArea):
def __init__(self, parent=None):
super().__init__(parent)
self.setWidgetResizable(True)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setStyleSheet(
'''
border:none;
'''
)
class ChatWidget(QWidget):
def __init__(self):
super().__init__()
self.resize(500, 200)
layout = QVBoxLayout()
layout.setSpacing(0)
self.adjustSize()
# 生成滚动区域
self.scrollArea = ScrollArea(self)
scrollBar = ScrollBar()
self.scrollArea.setVerticalScrollBar(scrollBar)
# 生成滚动区域的内容部署层部件
self.scrollAreaWidgetContents = ScrollAreaContent(self.scrollArea)
self.scrollAreaWidgetContents.setMinimumSize(50, 100)
# 设置滚动区域的内容部署部件为前面生成的内容部署层部件
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
layout.addWidget(self.scrollArea)
self.layout0 = QVBoxLayout()
self.layout0.setSpacing(0)
self.scrollAreaWidgetContents.setLayout(self.layout0)
self.setLayout(layout)
def add_message_item(self, bubble_message, index=1):
if index:
self.layout0.addWidget(bubble_message)
else:
self.layout0.insertWidget(0, bubble_message)
# self.set_scroll_bar_last()
def set_scroll_bar_last(self):
self.scrollArea.verticalScrollBar().setValue(
self.scrollArea.verticalScrollBar().maximum()
)
def set_scroll_bar_value(self, val):
self.verticalScrollBar().setValue(val)
def verticalScrollBar(self):
return self.scrollArea.verticalScrollBar()
def update(self) -> None:
super().update()
self.scrollAreaWidgetContents.adjustSize()
self.scrollArea.update()
# self.scrollArea.repaint()
# self.verticalScrollBar().setMaximum(self.scrollAreaWidgetContents.height())
| 494330.py | [
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
] |
# -*- coding: utf-8 -*-
"""
emoji.py
!!!声明:
由于表情包并不属于个人,并且其可能具有版权风险,你只有浏览权没有拥有权
另外访问腾讯API可能会给腾讯服务器造成压力
所以禁止任何人以任何方式修改或间接修改该文件,违者后果自负
"""
import os
import re
import traceback
import xml.etree.ElementTree as ET
import sqlite3
import threading
from PyQt5.QtGui import QPixmap
import requests
from app.log import log, logger
lock = threading.Lock()
db_path = "./app/Database/Msg/Emotion.db"
root_path = "./data/emoji/"
if not os.path.exists("./data"):
os.mkdir("./data")
if not os.path.exists(root_path):
os.mkdir(root_path)
@log
def get_image_format(header):
# 定义图片格式的 magic numbers
image_formats = {
b"\xFF\xD8\xFF": "jpeg",
b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png",
b"\x47\x49\x46": "gif",
b"\x42\x4D": "bmp",
# 添加其他图片格式的 magic numbers
}
# 判断文件的图片格式
for magic_number, image_format in image_formats.items():
if header.startswith(magic_number):
return image_format
# 如果无法识别格式,返回 None
return None
@log
def parser_xml(xml_string):
assert type(xml_string) == str
# Parse the XML string
try:
root = ET.fromstring(xml_string)
except:
res = re.search('<msg>.*</msg>', xml_string)
if res:
xml_string = res.group()
root = ET.fromstring(xml_string.replace("&", "&"))
emoji = root.find("./emoji")
# Accessing attributes of the 'emoji' element
fromusername = emoji.get("fromusername")
tousername = emoji.get("tousername")
md5 = emoji.get("md5")
cdnurl = emoji.get("cdnurl")
encrypturl = emoji.get("encrypturl")
thumburl = emoji.get("thumburl")
externurl = emoji.get("externurl")
androidmd5 = emoji.get("androidmd5")
width = emoji.get("width")
height = emoji.get("height")
return {
"width": width,
"height": height,
"cdnurl": cdnurl,
"thumburl": thumburl if thumburl else cdnurl,
"md5": (md5 if md5 else androidmd5).lower(),
}
def singleton(cls):
_instance = {}
def inner():
if cls not in _instance:
_instance[cls] = cls()
return _instance[cls]
return inner
# 一定要保证只有一个实例对象
@singleton
class Emotion:
def __init__(self):
self.DB = None
self.cursor: sqlite3.Cursor = None
self.open_flag = False
self.init_database()
def init_database(self):
if not self.open_flag:
if os.path.exists(db_path):
self.DB = sqlite3.connect(db_path, check_same_thread=False)
# '''创建游标'''
self.cursor = self.DB.cursor()
self.open_flag = True
if lock.locked():
lock.release()
def get_emoji_url(self, md5: str, thumb: bool) -> str | bytes:
"""供下载用,返回可能是url可能是bytes"""
if thumb:
sql = """
select
case
when thumburl is NULL or thumburl = '' then cdnurl
else thumburl
end as selected_url
from CustomEmotion
where md5 = ?
"""
else:
sql = """
select CDNUrl
from CustomEmotion
where md5 = ?
"""
try:
lock.acquire(True)
self.cursor.execute(sql, [md5])
return self.cursor.fetchone()[0]
except:
md5 = md5.upper()
sql = f"""
select {"Thumb" if thumb else "Data"}
from EmotionItem
where md5 = ?
"""
self.cursor.execute(sql, [md5])
res = self.cursor.fetchone()
return res[0] if res else ""
finally:
lock.release()
def get_emoji_URL(self, md5: str, thumb: bool):
"""只管url,另外的不管"""
if thumb:
sql = """
select
case
when thumburl is NULL or thumburl = '' then cdnurl
else thumburl
end as selected_url
from CustomEmotion
where md5 = ?
"""
else:
sql = """
select CDNUrl
from CustomEmotion
where md5 = ?
"""
try:
lock.acquire(True)
self.cursor.execute(sql, [md5])
return self.cursor.fetchone()[0]
except:
return ""
finally:
lock.release()
def close(self):
if self.open_flag:
try:
lock.acquire(True)
self.open_flag = False
self.DB.close()
finally:
lock.release()
def __del__(self):
self.close()
@log
def download(url, output_dir, name, thumb=False):
resp = requests.get(url)
byte = resp.content
image_format = get_image_format(byte[:8])
if image_format:
if thumb:
output_path = os.path.join(output_dir, "th_" + name + "." + image_format)
else:
output_path = os.path.join(output_dir, name + "." + image_format)
else:
output_path = os.path.join(output_dir, name)
with open(output_path, "wb") as f:
f.write(resp.content)
return output_path
def get_most_emoji(messages):
dic = {}
for msg in messages:
str_content = msg[7]
emoji_info = parser_xml(str_content)
if emoji_info is None:
continue
md5 = emoji_info["md5"]
if not md5:
continue
try:
dic[md5][0] += 1
except:
dic[md5] = [1, emoji_info]
md5_nums = [(num[0], key, num[1]) for key, num in dic.items()]
md5_nums.sort(key=lambda x: x[0], reverse=True)
if not md5_nums:
return "", 0
md5 = md5_nums[0][1]
num = md5_nums[0][0]
emoji_info = md5_nums[0][2]
url = emoji_info["cdnurl"]
if not url or url == "":
url = Emotion().get_emoji_url(md5, False)
return url, num
def get_emoji(xml_string, thumb=True, output_path=root_path) -> str:
"""供下载用"""
try:
emoji_info = parser_xml(xml_string)
md5 = emoji_info["md5"]
image_format = [".png", ".gif", ".jpeg"]
for f in image_format:
prefix = "th_" if thumb else ""
file_path = os.path.join(output_path, prefix + md5 + f)
if os.path.exists(file_path):
return file_path
url = emoji_info["thumburl"] if thumb else emoji_info["cdnurl"]
if not url or url == "":
url = Emotion().get_emoji_url(md5, thumb)
if type(url) == str and url != "":
print("下载表情包ing:", url)
emoji_path = download(url, output_path, md5, thumb)
return emoji_path
elif type(url) == bytes:
image_format = get_image_format(url[:8])
if image_format:
if thumb:
output_path = os.path.join(
output_path, "th_" + md5 + "." + image_format
)
else:
output_path = os.path.join(output_path, md5 + "." + image_format)
else:
output_path = os.path.join(output_path, md5)
with open(output_path, "wb") as f:
f.write(url)
print("表情包数据库加载", output_path)
return output_path
else:
print("!!!未知表情包数据,信息:", xml_string, emoji_info, url)
output_path = os.path.join(output_path, "404.png")
if not os.path.exists(output_path):
QPixmap(":/icons/icons/404.png").save(output_path)
return output_path
except:
logger.error(traceback.format_exc())
output_path = os.path.join(output_path, "404.png")
if not os.path.exists(output_path):
QPixmap(":/icons/icons/404.png").save(output_path)
return output_path
def get_emoji_path(xml_string, thumb=True, output_path=root_path) -> str:
try:
emoji_info = parser_xml(xml_string)
md5 = emoji_info["md5"]
image_format = [".png", ".gif", ".jpeg"]
for f in image_format:
prefix = "th_" if thumb else ""
file_path = os.path.join(output_path, prefix + md5 + f)
return file_path
except:
logger.error(traceback.format_exc())
output_path = os.path.join(output_path, "404.png")
return output_path
def get_emoji_url(xml_string, thumb=True) -> str:
"""不管下载,只返回url"""
try:
emoji_info = parser_xml(xml_string)
md5 = emoji_info["md5"]
url = emoji_info["thumburl" if thumb else "cdnurl"]
if not url or url == "":
url = Emotion().get_emoji_URL(md5=md5, thumb=thumb)
return url
except:
logger.error(traceback.format_exc())
output_path = os.path.join("./emoji/404.png")
return output_path
if __name__ == "__main__":
# xml_string = '<msg><emoji fromusername = "wxid_0o18ef858vnu22" tousername = "wxid_27hqbq7vx5hf22" type="2" idbuffer="media:0_0" md5="71ce49ed3ce9e57e43e07f802983bf45" len = "352588" productid="com.tencent.xin.emoticon.person.stiker_1678703862259eb01f2ef4a313" androidmd5="71ce49ed3ce9e57e43e07f802983bf45" androidlen="352588" s60v3md5 = "71ce49ed3ce9e57e43e07f802983bf45" s60v3len="352588" s60v5md5 = "71ce49ed3ce9e57e43e07f802983bf45" s60v5len="352588" cdnurl = "http://wxapp.tc.qq.com/262/20304/stodownload?m=71ce49ed3ce9e57e43e07f802983bf45&filekey=30350201010421301f020201060402535a041071ce49ed3ce9e57e43e07f802983bf45020305614c040d00000004627466730000000132&hy=SZ&storeid=263ffa00b000720d03274c5820000010600004f50535a1ca0c950b64287022&bizid=1023" designerid = "" thumburl = "http://mmbiz.qpic.cn/mmemoticon/ajNVdqHZLLDSKTMRgM8agiadpFhKz9IJ3cD5Ra2sTROibOaShdt3D4z6PfE92WkjQY/0" encrypturl = "http://wxapp.tc.qq.com/262/20304/stodownload?m=cbaae1d847aac6389652b65562bacaa2&filekey=30350201010421301f020201060402535a0410cbaae1d847aac6389652b65562bacaa20203056150040d00000004627466730000000132&hy=SZ&storeid=263ffa00b0008d8223274c5820000010600004f50535a17b82910b64764739&bizid=1023" aeskey= "7051ab2a34442dec63434832463f45ce" externurl = "http://wxapp.tc.qq.com/262/20304/stodownload?m=960f68693454dfa64b9966ca5d70dbd3&filekey=30340201010420301e020201060402535a0410960f68693454dfa64b9966ca5d70dbd3020221a0040d00000004627466730000000132&hy=SZ&storeid=26423dbe3000793a8720e40de0000010600004f50535a1d40c950b71be0a50&bizid=1023" externmd5 = "41895664fc5a77878e2155fc96209a19" width= "240" height= "240" tpurl= "" tpauthkey= "" attachedtext= "" attachedtextcolor= "" lensid= "" emojiattr= "" linkid= "" desc= "ChEKB2RlZmF1bHQSBuWNlee6rw==" ></emoji> </msg>'
# res1 = parser_xml(xml_string)
# print(res1, res1['md5'])
# download(res1['cdnurl'], "./data/emoji/", res1['md5'])
# download(res1['thumburl'], "./data/emoji/", res1['md5'], True)
# print(Emotion().get_emoji_url("144714f65c98844128ac3a1042445d9a", True))
# print(Emotion().get_emoji_url("144714f65c98844128ac3a1042445d9a", False))
print(parser_xml(""))
# print(get_emoji(xml_string, True))
# print(get_emoji(xml_string, False))
# http://vweixinf.tc.qq.com/110/20403/stodownload?m=3a4d439aba02dce4834b2c54e9f15597&filekey=3043020101042f302d02016e0402534804203361346434333961626130326463653438333462326335346539663135353937020213f0040d00000004627466730000000131&hy=SH&storeid=323032313037323030373236313130303039653236646365316535316534383236386234306230303030303036653033303034666233&ef=3&bizid=1022
| 095146.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
#!/usr/bin/env python
"""
Copyright (c) 2006-2024 sqlmap developers (https://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import glob
import os
import re
import shutil
import subprocess
import time
import zipfile
from lib.core.common import dataToStdout
from lib.core.common import extractRegexResult
from lib.core.common import getLatestRevision
from lib.core.common import getSafeExString
from lib.core.common import openFile
from lib.core.common import pollProcess
from lib.core.common import readInput
from lib.core.convert import getText
from lib.core.data import conf
from lib.core.data import logger
from lib.core.data import paths
from lib.core.revision import getRevisionNumber
from lib.core.settings import GIT_REPOSITORY
from lib.core.settings import IS_WIN
from lib.core.settings import VERSION
from lib.core.settings import TYPE
from lib.core.settings import ZIPBALL_PAGE
from thirdparty.six.moves import urllib as _urllib
def update():
if not conf.updateAll:
return
success = False
if TYPE == "pip":
infoMsg = "updating sqlmap to the latest stable version from the "
infoMsg += "PyPI repository"
logger.info(infoMsg)
debugMsg = "sqlmap will try to update itself using 'pip' command"
logger.debug(debugMsg)
dataToStdout("\r[%s] [INFO] update in progress" % time.strftime("%X"))
output = ""
try:
process = subprocess.Popen("pip install -U sqlmap", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=paths.SQLMAP_ROOT_PATH)
pollProcess(process, True)
output, _ = process.communicate()
success = not process.returncode
except Exception as ex:
success = False
output = getSafeExString(ex)
finally:
output = getText(output)
if success:
logger.info("%s the latest revision '%s'" % ("already at" if "already up-to-date" in output else "updated to", extractRegexResult(r"\binstalled sqlmap-(?P<result>\d+\.\d+\.\d+)", output) or extractRegexResult(r"\((?P<result>\d+\.\d+\.\d+)\)", output)))
else:
logger.error("update could not be completed ('%s')" % re.sub(r"[^a-z0-9:/\\]+", " ", output).strip())
elif not os.path.exists(os.path.join(paths.SQLMAP_ROOT_PATH, ".git")):
warnMsg = "not a git repository. It is recommended to clone the 'sqlmapproject/sqlmap' repository "
warnMsg += "from GitHub (e.g. 'git clone --depth 1 %s sqlmap')" % GIT_REPOSITORY
logger.warning(warnMsg)
if VERSION == getLatestRevision():
logger.info("already at the latest revision '%s'" % (getRevisionNumber() or VERSION))
return
message = "do you want to try to fetch the latest 'zipball' from repository and extract it (experimental) ? [y/N]"
if readInput(message, default='N', boolean=True):
directory = os.path.abspath(paths.SQLMAP_ROOT_PATH)
try:
open(os.path.join(directory, "sqlmap.py"), "w+b")
except Exception as ex:
errMsg = "unable to update content of directory '%s' ('%s')" % (directory, getSafeExString(ex))
logger.error(errMsg)
else:
attrs = os.stat(os.path.join(directory, "sqlmap.py")).st_mode
for wildcard in ('*', ".*"):
for _ in glob.glob(os.path.join(directory, wildcard)):
try:
if os.path.isdir(_):
shutil.rmtree(_)
else:
os.remove(_)
except:
pass
if glob.glob(os.path.join(directory, '*')):
errMsg = "unable to clear the content of directory '%s'" % directory
logger.error(errMsg)
else:
try:
archive = _urllib.request.urlretrieve(ZIPBALL_PAGE)[0]
with zipfile.ZipFile(archive) as f:
for info in f.infolist():
info.filename = re.sub(r"\Asqlmap[^/]+", "", info.filename)
if info.filename:
f.extract(info, directory)
filepath = os.path.join(paths.SQLMAP_ROOT_PATH, "lib", "core", "settings.py")
if os.path.isfile(filepath):
with openFile(filepath, "rb") as f:
version = re.search(r"(?m)^VERSION\s*=\s*['\"]([^'\"]+)", f.read()).group(1)
logger.info("updated to the latest version '%s#dev'" % version)
success = True
except Exception as ex:
logger.error("update could not be completed ('%s')" % getSafeExString(ex))
else:
if not success:
logger.error("update could not be completed")
else:
try:
os.chmod(os.path.join(directory, "sqlmap.py"), attrs)
except OSError:
logger.warning("could not set the file attributes of '%s'" % os.path.join(directory, "sqlmap.py"))
else:
infoMsg = "updating sqlmap to the latest development revision from the "
infoMsg += "GitHub repository"
logger.info(infoMsg)
debugMsg = "sqlmap will try to update itself using 'git' command"
logger.debug(debugMsg)
dataToStdout("\r[%s] [INFO] update in progress" % time.strftime("%X"))
output = ""
try:
process = subprocess.Popen("git checkout . && git pull %s HEAD" % GIT_REPOSITORY, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=paths.SQLMAP_ROOT_PATH)
pollProcess(process, True)
output, _ = process.communicate()
success = not process.returncode
except Exception as ex:
success = False
output = getSafeExString(ex)
finally:
output = getText(output)
if success:
logger.info("%s the latest revision '%s'" % ("already at" if "Already" in output else "updated to", getRevisionNumber()))
else:
if "Not a git repository" in output:
errMsg = "not a valid git repository. Please checkout the 'sqlmapproject/sqlmap' repository "
errMsg += "from GitHub (e.g. 'git clone --depth 1 %s sqlmap')" % GIT_REPOSITORY
logger.error(errMsg)
else:
logger.error("update could not be completed ('%s')" % re.sub(r"\W+", " ", output).strip())
if not success:
if IS_WIN:
infoMsg = "for Windows platform it's recommended "
infoMsg += "to use a GitHub for Windows client for updating "
infoMsg += "purposes (https://desktop.github.com/) or just "
infoMsg += "download the latest snapshot from "
infoMsg += "https://github.com/sqlmapproject/sqlmap/downloads"
else:
infoMsg = "for Linux platform it's recommended "
infoMsg += "to install a standard 'git' package (e.g.: 'apt install git')"
logger.info(infoMsg)
| 782536.py | [
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
] |