text
stringlengths
3
11.2M
id
stringlengths
15
188
metadata
dict
__index_level_0__
int64
0
275
<?xml version="1.0" encoding="UTF-8"?> <project version="4"> <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10" project-jdk-type="Python SDK" /> </project>
.idea/misc.xml/0
{ "file_path": ".idea/misc.xml", "repo_id": ".idea", "token_count": 63 }
0
<?xml version="1.0" encoding="UTF-8"?> <project version="4"> <component name="Black"> <option name="sdkName" value="LLaMA-Factory" /> </component> <component name="ProjectRootManager" version="2" project-jdk-name="LLaMA-Factory" project-jdk-type="Python SDK" /> </project>
LLaMA-Factory/.idea/misc.xml/0
{ "file_path": "LLaMA-Factory/.idea/misc.xml", "repo_id": "LLaMA-Factory", "token_count": 98 }
1
{ "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "zero_allow_untested_optimizer": true, "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": 3, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1000000000.0, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1000000000.0, "stage3_max_reuse_distance": 1000000000.0, "stage3_gather_16bit_weights_on_model_save": true } }
LLaMA-Factory/cache/ds_z3_config.json/0
{ "file_path": "LLaMA-Factory/cache/ds_z3_config.json", "repo_id": "LLaMA-Factory", "token_count": 354 }
2
{ "agronomy": { "name": "农学", "category": "Other" }, "anatomy": { "name": "解剖学", "category": "STEM" }, "ancient_chinese": { "name": "古汉语", "category": "Social Sciences" }, "arts": { "name": "艺术学", "category": "Humanities" }, "astronomy": { "name": "天文学", "category": "STEM" }, "business_ethics": { "name": "商业伦理", "category": "Social Sciences" }, "chinese_civil_service_exam": { "name": "中国公务员考试", "category": "Social Sciences" }, "chinese_driving_rule": { "name": "中国驾驶规则", "category": "Other" }, "chinese_food_culture": { "name": "中国饮食文化", "category": "Social Sciences" }, "chinese_foreign_policy": { "name": "中国外交政策", "category": "Social Sciences" }, "chinese_history": { "name": "中国历史", "category": "Humanities" }, "chinese_literature": { "name": "中国文学", "category": "Humanities" }, "chinese_teacher_qualification": { "name": "中国教师资格", "category": "Social Sciences" }, "college_actuarial_science": { "name": "大学精算学", "category": "STEM" }, "college_education": { "name": "大学教育学", "category": "Social Sciences" }, "college_engineering_hydrology": { "name": "大学工程水文学", "category": "STEM" }, "college_law": { "name": "大学法律", "category": "Humanities" }, "college_mathematics": { "name": "大学数学", "category": "STEM" }, "college_medical_statistics": { "name": "大学医学统计", "category": "STEM" }, "clinical_knowledge": { "name": "临床知识", "category": "Other" }, "college_medicine": { "name": "大学医学", "category": "Other" }, "computer_science": { "name": "计算机科学", "category": "STEM" }, "computer_security": { "name": "计算机安全", "category": "Other" }, "conceptual_physics": { "name": "概念物理学", "category": "STEM" }, "construction_project_management": { "name": "建设工程管理", "category": "Other" }, "economics": { "name": "经济学", "category": "Social Sciences" }, "education": { "name": "教育学", "category": "Social Sciences" }, "elementary_chinese": { "name": "小学语文", "category": "Social Sciences" }, "elementary_commonsense": { "name": "小学常识", "category": "Other" }, "elementary_information_and_technology": { "name": "小学信息技术", "category": "Other" }, "electrical_engineering": { "name": "电气工程", "category": "STEM" }, "elementary_mathematics": { "name": "初等数学", "category": "STEM" }, "ethnology": { "name": "民族学", "category": "Social Sciences" }, "food_science": { "name": "食品科学", "category": "Other" }, "genetics": { "name": "遗传学", "category": "STEM" }, "global_facts": { "name": "全球事实", "category": "Humanities" }, "high_school_biology": { "name": "高中生物", "category": "STEM" }, "high_school_chemistry": { "name": "高中化学", "category": "STEM" }, "high_school_geography": { "name": "高中地理", "category": "Social Sciences" }, "high_school_mathematics": { "name": "高中数学", "category": "STEM" }, "high_school_physics": { "name": "高中物理学", "category": "STEM" }, "high_school_politics": { "name": "高中政治", "category": "Social Sciences" }, "human_sexuality": { "name": "人类性行为", "category": "Other" }, "international_law": { "name": "国际法学", "category": "Humanities" }, "journalism": { "name": "新闻学", "category": "Social Sciences" }, "jurisprudence": { "name": "法理学", "category": "Humanities" }, "legal_and_moral_basis": { "name": "法律与道德基础", "category": "Other" }, "logical": { "name": "逻辑学", "category": "Humanities" }, "machine_learning": { "name": "机器学习", "category": "STEM" }, "management": { "name": "管理学", "category": "Social Sciences" }, "marketing": { "name": "市场营销", "category": "Social Sciences" }, "marxist_theory": { "name": "马克思主义理论", "category": "Humanities" }, "modern_chinese": { "name": "现代汉语", "category": "Social Sciences" }, "nutrition": { "name": "营养学", "category": "Other" }, "philosophy": { "name": "哲学", "category": "Humanities" }, "professional_accounting": { "name": "专业会计", "category": "Social Sciences" }, "professional_law": { "name": "专业法学", "category": "Humanities" }, "professional_medicine": { "name": "专业医学", "category": "Other" }, "professional_psychology": { "name": "专业心理学", "category": "Social Sciences" }, "public_relations": { "name": "公共关系", "category": "Social Sciences" }, "security_study": { "name": "安全研究", "category": "Social Sciences" }, "sociology": { "name": "社会学", "category": "Social Sciences" }, "sports_science": { "name": "体育学", "category": "Other" }, "traditional_chinese_medicine": { "name": "中医中药", "category": "Other" }, "virology": { "name": "病毒学", "category": "STEM" }, "world_history": { "name": "世界历史", "category": "Humanities" }, "world_religions": { "name": "世界宗教", "category": "Humanities" } }
LLaMA-Factory/evaluation/cmmlu/mapping.json/0
{ "file_path": "LLaMA-Factory/evaluation/cmmlu/mapping.json", "repo_id": "LLaMA-Factory", "token_count": 2730 }
3
### model model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct ### method stage: sft do_train: true finetuning_type: full use_galore: true galore_layerwise: true galore_target: mlp,self_attn galore_rank: 128 galore_scale: 2.0 ### dataset dataset: identity,alpaca_en_demo template: llama3 cutoff_len: 1024 max_samples: 1000 overwrite_cache: true preprocessing_num_workers: 16 ### output output_dir: saves/llama3-8b/full/sft logging_steps: 10 save_steps: 500 plot_loss: true overwrite_output_dir: true ### train per_device_train_batch_size: 1 gradient_accumulation_steps: 1 learning_rate: 1.0e-4 num_train_epochs: 3.0 lr_scheduler_type: cosine warmup_ratio: 0.1 pure_bf16: true ### eval val_size: 0.1 per_device_eval_batch_size: 1 eval_strategy: steps eval_steps: 500
LLaMA-Factory/examples/extras/galore/llama3_full_sft.yaml/0
{ "file_path": "LLaMA-Factory/examples/extras/galore/llama3_full_sft.yaml", "repo_id": "LLaMA-Factory", "token_count": 315 }
4
### model model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct reward_model: saves/llama3-8b/lora/reward ### method stage: ppo do_train: true finetuning_type: lora lora_target: all ### dataset dataset: identity,alpaca_en_demo template: llama3 cutoff_len: 1024 max_samples: 1000 overwrite_cache: true preprocessing_num_workers: 16 ### output output_dir: saves/llama3-8b/lora/ppo logging_steps: 10 save_steps: 500 plot_loss: true overwrite_output_dir: true ### train per_device_train_batch_size: 1 gradient_accumulation_steps: 8 learning_rate: 1.0e-5 num_train_epochs: 3.0 lr_scheduler_type: cosine warmup_ratio: 0.1 fp16: true ddp_timeout: 180000000 ### generate max_new_tokens: 512 top_k: 0 top_p: 0.9
LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml/0
{ "file_path": "LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml", "repo_id": "LLaMA-Factory", "token_count": 296 }
5
# Copyright 2024 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import concurrent.futures import os from threading import Thread from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Dict, List, Optional, Sequence, Tuple, Union import torch from transformers import GenerationConfig, TextIteratorStreamer from ..data import get_template_and_fix_tokenizer from ..extras.logging import get_logger from ..extras.misc import get_logits_processor from ..model import load_model, load_tokenizer from .base_engine import BaseEngine, Response if TYPE_CHECKING: from numpy.typing import NDArray from transformers import PreTrainedModel, PreTrainedTokenizer, ProcessorMixin from transformers.image_processing_utils import BaseImageProcessor from trl import PreTrainedModelWrapper from ..data import Template from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments logger = get_logger(__name__) class HuggingfaceEngine(BaseEngine): def __init__( self, model_args: "ModelArguments", data_args: "DataArguments", finetuning_args: "FinetuningArguments", generating_args: "GeneratingArguments", ) -> None: self.can_generate = finetuning_args.stage == "sft" tokenizer_module = load_tokenizer(model_args) self.tokenizer = tokenizer_module["tokenizer"] self.processor = tokenizer_module["processor"] self.tokenizer.padding_side = "left" if self.can_generate else "right" self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template, data_args.tool_format) self.model = load_model( self.tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate) ) # must after fixing tokenizer to resize vocab self.generating_args = generating_args.to_dict() try: asyncio.get_event_loop() except RuntimeError: logger.warning("There is no current event loop, creating a new one.") loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) self.semaphore = asyncio.Semaphore(int(os.environ.get("MAX_CONCURRENT", "1"))) @staticmethod def _process_args( model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], template: "Template", generating_args: Dict[str, Any], messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, image: Optional["NDArray"] = None, input_kwargs: Optional[Dict[str, Any]] = {}, ) -> Tuple[Dict[str, Any], int]: if ( processor is not None and image is not None and not hasattr(processor, "image_seq_length") and template.image_token not in messages[0]["content"] ): # llava-like models messages[0]["content"] = template.image_token + messages[0]["content"] paired_messages = messages + [{"role": "assistant", "content": ""}] system = system or generating_args["default_system"] pixel_values = None prompt_ids, _ = template.encode_oneturn( tokenizer=tokenizer, messages=paired_messages, system=system, tools=tools ) if processor is not None and image is not None: # add image features image_processor: "BaseImageProcessor" = getattr(processor, "image_processor") batch_feature = image_processor(image, return_tensors="pt") pixel_values = batch_feature.to(model.device)["pixel_values"] # shape (B, C, H, W) if hasattr(processor, "image_seq_length"): # paligemma models image_token_id = tokenizer.convert_tokens_to_ids(template.image_token) prompt_ids = [image_token_id] * getattr(processor, "image_seq_length") + prompt_ids prompt_length = len(prompt_ids) inputs = torch.tensor([prompt_ids], device=model.device) attention_mask = torch.ones_like(inputs, dtype=torch.bool) do_sample: Optional[bool] = input_kwargs.pop("do_sample", None) temperature: Optional[float] = input_kwargs.pop("temperature", None) top_p: Optional[float] = input_kwargs.pop("top_p", None) top_k: Optional[float] = input_kwargs.pop("top_k", None) num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1) repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None) length_penalty: Optional[float] = input_kwargs.pop("length_penalty", None) max_length: Optional[int] = input_kwargs.pop("max_length", None) max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None) stop: Optional[Union[str, List[str]]] = input_kwargs.pop("stop", None) if stop is not None: logger.warning("Stop parameter is not supported in Huggingface engine yet.") generating_args = generating_args.copy() generating_args.update( dict( do_sample=do_sample if do_sample is not None else generating_args["do_sample"], temperature=temperature if temperature is not None else generating_args["temperature"], top_p=top_p if top_p is not None else generating_args["top_p"], top_k=top_k if top_k is not None else generating_args["top_k"], num_return_sequences=num_return_sequences, repetition_penalty=repetition_penalty if repetition_penalty is not None else generating_args["repetition_penalty"], length_penalty=length_penalty if length_penalty is not None else generating_args["length_penalty"], eos_token_id=[tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids, pad_token_id=tokenizer.pad_token_id, ) ) if isinstance(num_return_sequences, int) and num_return_sequences > 1: # do_sample needs temperature > 0 generating_args["do_sample"] = True generating_args["temperature"] = generating_args["temperature"] or 1.0 if not generating_args["temperature"]: generating_args["do_sample"] = False if not generating_args["do_sample"]: generating_args.pop("temperature", None) generating_args.pop("top_p", None) if max_length: generating_args.pop("max_new_tokens", None) generating_args["max_length"] = max_length if max_new_tokens: generating_args.pop("max_length", None) generating_args["max_new_tokens"] = max_new_tokens gen_kwargs = dict( inputs=inputs, attention_mask=attention_mask, generation_config=GenerationConfig(**generating_args), logits_processor=get_logits_processor(), ) if pixel_values is not None: gen_kwargs["pixel_values"] = pixel_values return gen_kwargs, prompt_length @staticmethod @torch.inference_mode() def _chat( model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], template: "Template", generating_args: Dict[str, Any], messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, image: Optional["NDArray"] = None, input_kwargs: Optional[Dict[str, Any]] = {}, ) -> List["Response"]: gen_kwargs, prompt_length = HuggingfaceEngine._process_args( model, tokenizer, processor, template, generating_args, messages, system, tools, image, input_kwargs ) generate_output = model.generate(**gen_kwargs) response_ids = generate_output[:, prompt_length:] response = tokenizer.batch_decode(response_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) results = [] for i in range(len(response)): eos_index = (response_ids[i] == tokenizer.eos_token_id).nonzero() response_length = (eos_index[0].item() + 1) if len(eos_index) else len(response_ids[i]) results.append( Response( response_text=response[i], response_length=response_length, prompt_length=prompt_length, finish_reason="stop" if len(eos_index) else "length", ) ) return results @staticmethod @torch.inference_mode() def _stream_chat( model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], template: "Template", generating_args: Dict[str, Any], messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, image: Optional["NDArray"] = None, input_kwargs: Optional[Dict[str, Any]] = {}, ) -> Callable[[], str]: gen_kwargs, _ = HuggingfaceEngine._process_args( model, tokenizer, processor, template, generating_args, messages, system, tools, image, input_kwargs ) streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) gen_kwargs["streamer"] = streamer thread = Thread(target=model.generate, kwargs=gen_kwargs, daemon=True) thread.start() def stream(): try: return streamer.__next__() except StopIteration: raise StopAsyncIteration() return stream @staticmethod @torch.inference_mode() def _get_scores( model: "PreTrainedModelWrapper", tokenizer: "PreTrainedTokenizer", batch_input: List[str], input_kwargs: Optional[Dict[str, Any]] = {}, ) -> List[float]: max_length = input_kwargs.pop("max_length", None) device = getattr(model.pretrained_model, "device", "cuda") inputs = tokenizer( batch_input, padding=True, truncation=True, max_length=max_length or getattr(model.config, "max_position_embeddings", 1024), return_tensors="pt", add_special_tokens=True, ).to(device) input_ids: torch.Tensor = inputs["input_ids"] _, _, values = model(**inputs, output_hidden_states=True, return_dict=True) if getattr(model.config, "model_type", None) == "chatglm": values = torch.transpose(values, 0, 1) scores = [] for i in range(input_ids.size(0)): end_indexes = (input_ids[i] != tokenizer.pad_token_id).nonzero() end_index = end_indexes[-1].item() if len(end_indexes) else 0 scores.append(values[i, end_index].nan_to_num().item()) return scores async def chat( self, messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, image: Optional["NDArray"] = None, **input_kwargs, ) -> List["Response"]: if not self.can_generate: raise ValueError("The current model does not support `chat`.") loop = asyncio.get_running_loop() input_args = ( self.model, self.tokenizer, self.processor, self.template, self.generating_args, messages, system, tools, image, input_kwargs, ) async with self.semaphore: with concurrent.futures.ThreadPoolExecutor() as pool: return await loop.run_in_executor(pool, self._chat, *input_args) async def stream_chat( self, messages: Sequence[Dict[str, str]], system: Optional[str] = None, tools: Optional[str] = None, image: Optional["NDArray"] = None, **input_kwargs, ) -> AsyncGenerator[str, None]: if not self.can_generate: raise ValueError("The current model does not support `stream_chat`.") loop = asyncio.get_running_loop() input_args = ( self.model, self.tokenizer, self.processor, self.template, self.generating_args, messages, system, tools, image, input_kwargs, ) async with self.semaphore: with concurrent.futures.ThreadPoolExecutor() as pool: stream = self._stream_chat(*input_args) while True: try: yield await loop.run_in_executor(pool, stream) except StopAsyncIteration: break async def get_scores( self, batch_input: List[str], **input_kwargs, ) -> List[float]: if self.can_generate: raise ValueError("Cannot get scores using an auto-regressive model.") loop = asyncio.get_running_loop() input_args = (self.model, self.tokenizer, batch_input, input_kwargs) async with self.semaphore: with concurrent.futures.ThreadPoolExecutor() as pool: return await loop.run_in_executor(pool, self._get_scores, *input_args)
LLaMA-Factory/src/llamafactory/chat/hf_engine.py/0
{ "file_path": "LLaMA-Factory/src/llamafactory/chat/hf_engine.py", "repo_id": "LLaMA-Factory", "token_count": 6075 }
6
# Copyright 2024 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Any, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union SLOTS = Sequence[Union[str, Set[str], Dict[str, str]]] DEFAULT_TOOL_PROMPT = ( "You have access to the following tools:\n{tool_text}" "Use the following format if using a tool:\n" "```\n" "Action: tool name (one of [{tool_names}]).\n" "Action Input: the input to the tool, in a JSON format representing the kwargs " """(e.g. ```{{"input": "hello world", "num_beams": 5}}```).\n""" "```\n" ) GLM4_TOOL_PROMPT = ( "你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的," "你的任务是针对用户的问题和要求提供适当的答复和支持。# 可用工具{tool_text}" ) def default_tool_formatter(tools: List[Dict[str, Any]]) -> str: tool_text = "" tool_names = [] for tool in tools: param_text = "" for name, param in tool["parameters"]["properties"].items(): required = ", required" if name in tool["parameters"].get("required", []) else "" enum = ", should be one of [{}]".format(", ".join(param["enum"])) if param.get("enum", None) else "" items = ( ", where each item should be {}".format(param["items"].get("type", "")) if param.get("items") else "" ) param_text += " - {name} ({type}{required}): {desc}{enum}{items}\n".format( name=name, type=param.get("type", ""), required=required, desc=param.get("description", ""), enum=enum, items=items, ) tool_text += "> Tool Name: {name}\nTool Description: {desc}\nTool Args:\n{args}\n".format( name=tool["name"], desc=tool.get("description", ""), args=param_text ) tool_names.append(tool["name"]) return DEFAULT_TOOL_PROMPT.format(tool_text=tool_text, tool_names=", ".join(tool_names)) def default_tool_extractor(content: str) -> Union[str, List[Tuple[str, str]]]: regex = re.compile(r"Action:\s*([a-zA-Z0-9_]+)\s*Action Input:\s*(.+?)(?=\s*Action:|\s*$)", re.DOTALL) action_match: List[Tuple[str, str]] = re.findall(regex, content) if not action_match: return content results = [] for match in action_match: tool_name = match[0].strip() tool_input = match[1].strip().strip('"').strip("```") try: arguments = json.loads(tool_input) results.append((tool_name, json.dumps(arguments, ensure_ascii=False))) except json.JSONDecodeError: return content return results def glm4_tool_formatter(tools: List[Dict[str, Any]]) -> str: tool_text = "" for tool in tools: tool_text += "\n\n## {name}\n\n{body}\n在调用上述函数时,请使用 Json 格式表示调用的参数。".format( name=tool["name"], body=json.dumps(tool, indent=4, ensure_ascii=False) ) return GLM4_TOOL_PROMPT.format(tool_text=tool_text) def glm4_tool_extractor(content: str) -> Union[str, List[Tuple[str, str]]]: if "\n" not in content: return content tool_name, tool_input = content.split("\n", maxsplit=1) try: arguments = json.loads(tool_input) except json.JSONDecodeError: return content return [(tool_name, json.dumps(arguments, ensure_ascii=False))] @dataclass class Formatter(ABC): slots: SLOTS = field(default_factory=list) tool_format: Optional[Literal["default", "glm4"]] = None @abstractmethod def apply(self, **kwargs) -> SLOTS: ... def extract(self, content: str) -> Union[str, List[Tuple[str, str]]]: raise NotImplementedError @dataclass class EmptyFormatter(Formatter): def __post_init__(self): has_placeholder = False for slot in filter(lambda s: isinstance(s, str), self.slots): if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot): has_placeholder = True if has_placeholder: raise ValueError("Empty formatter should not contain any placeholder.") def apply(self, **kwargs) -> SLOTS: return self.slots @dataclass class StringFormatter(Formatter): def __post_init__(self): has_placeholder = False for slot in filter(lambda s: isinstance(s, str), self.slots): if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot): has_placeholder = True if not has_placeholder: raise ValueError("A placeholder is required in the string formatter.") def apply(self, **kwargs) -> SLOTS: elements = [] for slot in self.slots: if isinstance(slot, str): for name, value in kwargs.items(): if not isinstance(value, str): raise RuntimeError("Expected a string, got {}".format(value)) slot = slot.replace("{{" + name + "}}", value, 1) elements.append(slot) elif isinstance(slot, (dict, set)): elements.append(slot) else: raise RuntimeError("Input must be string, set[str] or dict[str, str], got {}".format(type(slot))) return elements @dataclass class FunctionFormatter(Formatter): def __post_init__(self): has_name, has_args = False, False for slot in filter(lambda s: isinstance(s, str), self.slots): if "{{name}}" in slot: has_name = True if "{{arguments}}" in slot: has_args = True if not has_name or not has_args: raise ValueError("Name and arguments placeholders are required in the function formatter.") def apply(self, **kwargs) -> SLOTS: content = kwargs.pop("content") functions: List[Tuple[str, str]] = [] try: tool_calls = json.loads(content) if not isinstance(tool_calls, list): # parallel function call tool_calls = [tool_calls] for tool_call in tool_calls: functions.append((tool_call["name"], json.dumps(tool_call["arguments"], ensure_ascii=False))) except json.JSONDecodeError: functions = [] elements = [] for name, arguments in functions: for slot in self.slots: if isinstance(slot, str): slot = slot.replace("{{name}}", name).replace("{{arguments}}", arguments) elements.append(slot) elif isinstance(slot, (dict, set)): elements.append(slot) else: raise RuntimeError("Input must be string, set[str] or dict[str, str], got {}".format(type(slot))) return elements @dataclass class ToolFormatter(Formatter): def __post_init__(self): if self.tool_format == "default": self._tool_formatter = default_tool_formatter self._tool_extractor = default_tool_extractor elif self.tool_format == "glm4": self._tool_formatter = glm4_tool_formatter self._tool_extractor = glm4_tool_extractor else: raise NotImplementedError("Tool format {} was not found.".format(self.tool_format)) def apply(self, **kwargs) -> SLOTS: content = kwargs.pop("content") try: tools = json.loads(content) return [self._tool_formatter(tools) if len(tools) != 0 else ""] except json.JSONDecodeError: return [""] def extract(self, content: str) -> Union[str, List[Tuple[str, str]]]: return self._tool_extractor(content)
LLaMA-Factory/src/llamafactory/data/formatter.py/0
{ "file_path": "LLaMA-Factory/src/llamafactory/data/formatter.py", "repo_id": "LLaMA-Factory", "token_count": 3734 }
7
# Copyright 2024 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple from ...extras.constants import IGNORE_INDEX from ...extras.logging import get_logger from .processor_utils import get_paligemma_token_type_ids, get_pixel_values, greedy_knapsack if TYPE_CHECKING: from transformers import PreTrainedTokenizer, ProcessorMixin from ...hparams import DataArguments from ..template import Template logger = get_logger(__name__) def _encode_supervised_example( prompt: Sequence[Dict[str, str]], response: Sequence[Dict[str, str]], system: Optional[str], tools: Optional[str], template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], data_args: "DataArguments", ) -> Tuple[List[int], List[int]]: if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models prompt[0]["content"] = template.image_token + prompt[0]["content"] messages = prompt + response input_ids, labels = [], [] if processor is not None and hasattr(processor, "image_seq_length"): # paligemma models image_token_id = tokenizer.convert_tokens_to_ids(template.image_token) input_ids += [image_token_id] * getattr(processor, "image_seq_length") labels += [IGNORE_INDEX] * getattr(processor, "image_seq_length") encoded_pairs = template.encode_multiturn( tokenizer, messages, system, tools, data_args.cutoff_len, data_args.reserved_label_len ) for turn_idx, (source_ids, target_ids) in enumerate(encoded_pairs): if data_args.train_on_prompt: source_mask = source_ids elif turn_idx != 0 and template.efficient_eos: source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) else: source_mask = [IGNORE_INDEX] * len(source_ids) input_ids += source_ids + target_ids labels += source_mask + target_ids if template.efficient_eos: input_ids += [tokenizer.eos_token_id] labels += [tokenizer.eos_token_id] return input_ids, labels def preprocess_supervised_dataset( examples: Dict[str, List[Any]], template: "Template", tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>` # for multiturn examples, we only mask the prompt part in each prompt-response pair. model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} if processor is not None: model_inputs["pixel_values"] = [] if hasattr(processor, "image_seq_length"): # paligemma models model_inputs["token_type_ids"] = [] for i in range(len(examples["prompt"])): if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1: logger.warning("Dropped invalid example: {}".format(examples["prompt"][i] + examples["response"][i])) continue input_ids, labels = _encode_supervised_example( prompt=examples["prompt"][i], response=examples["response"][i], system=examples["system"][i], tools=examples["tools"][i], template=template, tokenizer=tokenizer, processor=processor, data_args=data_args, ) model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) if processor is not None: model_inputs["pixel_values"].append(get_pixel_values(examples["images"][i], processor)) if hasattr(processor, "image_seq_length"): # paligemma models model_inputs["token_type_ids"].append(get_paligemma_token_type_ids(len(input_ids), processor)) return model_inputs def preprocess_packed_supervised_dataset( examples: Dict[str, List[Any]], template: "Template", tokenizer: "PreTrainedTokenizer", data_args: "DataArguments", ) -> Dict[str, List[List[int]]]: # build inputs with format `<bos> X1 Y1 <eos> <bos> X2 Y2 <eos>` # and labels with format `<ignore> ... <ignore> Y1 <eos> <ignore> ... <ignore> Y2 <eos>` valid_num = 0 batch_input_ids, batch_labels = [], [] lengths = [] length2indexes = defaultdict(list) for i in range(len(examples["prompt"])): if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1: logger.warning("Dropped invalid example: {}".format(examples["prompt"][i] + examples["response"][i])) continue input_ids, labels = _encode_supervised_example( prompt=examples["prompt"][i], response=examples["response"][i], system=examples["system"][i], tools=examples["tools"][i], template=template, tokenizer=tokenizer, processor=None, data_args=data_args, ) length = len(input_ids) if length > data_args.cutoff_len: logger.warning("Dropped lengthy example with length {} > {}.".format(length, data_args.cutoff_len)) else: lengths.append(length) length2indexes[length].append(valid_num) batch_input_ids.append(input_ids) batch_labels.append(labels) valid_num += 1 model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} knapsacks = greedy_knapsack(lengths, data_args.cutoff_len) for knapsack in knapsacks: packed_input_ids, packed_labels = [], [] for length in knapsack: index = length2indexes[length].pop() packed_input_ids += batch_input_ids[index] packed_labels += batch_labels[index] if len(packed_input_ids) < data_args.cutoff_len: pad_length = data_args.cutoff_len - len(packed_input_ids) packed_input_ids += [tokenizer.pad_token_id] * pad_length packed_labels += [IGNORE_INDEX] * pad_length if len(packed_input_ids) != data_args.cutoff_len: raise ValueError("The length of packed example should be identical to the cutoff length.") model_inputs["input_ids"].append(packed_input_ids) model_inputs["attention_mask"].append([1] * data_args.cutoff_len) model_inputs["labels"].append(packed_labels) return model_inputs def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None: valid_labels = list(filter(lambda x: x != IGNORE_INDEX, example["labels"])) print("input_ids:\n{}".format(example["input_ids"])) print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) print("label_ids:\n{}".format(example["labels"])) print("labels:\n{}".format(tokenizer.decode(valid_labels, skip_special_tokens=False)))
LLaMA-Factory/src/llamafactory/data/processors/supervised.py/0
{ "file_path": "LLaMA-Factory/src/llamafactory/data/processors/supervised.py", "repo_id": "LLaMA-Factory", "token_count": 3099 }
8
# Copyright 2024 EleutherAI, HuggingFace Inc., Yukang Chen, and the LlamaFactory team. # # This code is based on the EleutherAI's GPT-NeoX and the HuggingFace's Transformers libraries. # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py # This code is also inspired by the original LongLoRA implementation. # https://github.com/dvlab-research/LongLoRA/blob/main/llama_attn_replace.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import TYPE_CHECKING, Optional, Tuple import torch import torch.nn as nn from transformers.models.llama.modeling_llama import ( Cache, LlamaAttention, LlamaFlashAttention2, LlamaSdpaAttention, apply_rotary_pos_emb, repeat_kv, ) from transformers.utils import logging from transformers.utils.versions import require_version from ...extras.constants import SUPPORTED_CLASS_FOR_S2ATTN from ...extras.logging import get_logger if TYPE_CHECKING: from transformers import PretrainedConfig from ...hparams import ModelArguments logger = logging.get_logger(__name__) # Modified from: # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py def llama_attention_forward( self: "LlamaAttention", hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional["Cache"] = None, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states: "torch.Tensor" = self.q_proj(hidden_states) key_states: "torch.Tensor" = self.k_proj(hidden_states) value_states: "torch.Tensor" = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) past_key_value = getattr(self, "past_key_value", past_key_value) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if getattr(self.config, "group_size_ratio", None) and self.training: # shift groupsz = int(q_len * getattr(self.config, "group_size_ratio")) assert q_len % groupsz == 0, "q_len {} should be divisible by group size {}.".format(q_len, groupsz) num_groups = q_len // groupsz def shift(state: torch.Tensor) -> torch.Tensor: state = state.transpose(1, 2) # output: (bsz, seq_len, n_heads, head_dim) state = torch.cat( (state[:, :, : self.num_heads // 2], state[:, :, self.num_heads // 2 :].roll(-groupsz // 2, dims=1)), dim=2, ) return state.reshape(bsz * num_groups, groupsz, self.num_heads, self.head_dim).transpose(1, 2) query_states, key_states, value_states = shift(query_states), shift(key_states), shift(value_states) if attention_mask is not None: attention_mask = attention_mask[:, :, :groupsz, :groupsz].repeat(num_groups, 1, 1, 1) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) # (bsz, :, seq_len, :) or (bsz * n_group, :, groupsz, :) attn_output = attn_output.transpose(1, 2).contiguous() if getattr(self.config, "group_size_ratio", None) and self.training: # shift back attn_output.reshape(bsz, q_len, self.num_heads, self.head_dim) attn_output = torch.cat( ( attn_output[:, :, : self.num_heads // 2], attn_output[:, :, self.num_heads // 2 :].roll(groupsz // 2, dims=1), ), dim=2, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Modified from: # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py def llama_flash_attention_2_forward( self: "LlamaFlashAttention2", hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional["Cache"] = None, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # LlamaFlashAttention2 attention does not support output_attentions output_attentions = False bsz, q_len, _ = hidden_states.size() query_states: "torch.Tensor" = self.q_proj(hidden_states) key_states: "torch.Tensor" = self.k_proj(hidden_states) value_states: "torch.Tensor" = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) past_key_value = getattr(self, "past_key_value", past_key_value) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) # FlashAttention requires the input to have the shape (bsz, seq_len, n_heads, head_dim) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once("The input hidden states seems to be silently casted in float32.") query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) if getattr(self.config, "group_size_ratio", None) and self.training: # shift groupsz = int(q_len * getattr(self.config, "group_size_ratio")) assert q_len % groupsz == 0, "q_len {} should be divisible by group size {}.".format(q_len, groupsz) num_groups = q_len // groupsz def shift(state: torch.Tensor) -> torch.Tensor: state = torch.cat( (state[:, :, : self.num_heads // 2], state[:, :, self.num_heads // 2 :].roll(-groupsz // 2, dims=1)), dim=2, ) return state.reshape(bsz * num_groups, groupsz, self.num_heads, self.head_dim) query_states, key_states, value_states = shift(query_states), shift(key_states), shift(value_states) if attention_mask is not None: attention_mask = attention_mask[:, :groupsz].repeat(num_groups, 1) attn_output: torch.Tensor = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, query_states.size(1), dropout=dropout_rate ) if getattr(self.config, "group_size_ratio", None) and self.training: # shift back attn_output.reshape(bsz, q_len, self.num_heads, self.head_dim) attn_output = torch.cat( ( attn_output[:, :, : self.num_heads // 2], attn_output[:, :, self.num_heads // 2 :].roll(groupsz // 2, dims=1), ), dim=2, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Modified from: # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llama/modeling_llama.py def llama_sdpa_attention_forward( self: "LlamaSdpaAttention", hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional["Cache"] = None, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once("SDPA does not support `output_attentions=True`. Falling back to the vanilla attention") return llama_attention_forward( self, hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) bsz, q_len, _ = hidden_states.size() query_states: "torch.Tensor" = self.q_proj(hidden_states) key_states: "torch.Tensor" = self.k_proj(hidden_states) value_states: "torch.Tensor" = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if getattr(self.config, "group_size_ratio", None) and self.training: # shift groupsz = int(q_len * getattr(self.config, "group_size_ratio")) assert q_len % groupsz == 0, "q_len {} should be divisible by group size {}.".format(q_len, groupsz) num_groups = q_len // groupsz def shift(state: torch.Tensor) -> torch.Tensor: state = state.transpose(1, 2) # output: (bsz, seq_len, n_heads, head_dim) state = torch.cat( (state[:, :, : self.num_heads // 2], state[:, :, self.num_heads // 2 :].roll(-groupsz // 2, dims=1)), dim=2, ) return state.reshape(bsz * num_groups, groupsz, self.num_heads, self.head_dim).transpose(1, 2) query_states, key_states, value_states = shift(query_states), shift(key_states), shift(value_states) if attention_mask is not None: attention_mask = attention_mask[:, :, :groupsz, :groupsz].repeat(num_groups, 1, 1, 1) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=causal_mask is None and q_len > 1, ) attn_output = attn_output.transpose(1, 2).contiguous() if getattr(self.config, "group_size_ratio", None) and self.training: # shift back attn_output.reshape(bsz, q_len, self.num_heads, self.head_dim) attn_output = torch.cat( ( attn_output[:, :, : self.num_heads // 2], attn_output[:, :, self.num_heads // 2 :].roll(groupsz // 2, dims=1), ), dim=2, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output, None, past_key_value def _apply_llama_patch() -> None: require_version("transformers==4.41.2", "To fix: pip install transformers==4.41.2") LlamaAttention.forward = llama_attention_forward LlamaFlashAttention2.forward = llama_flash_attention_2_forward LlamaSdpaAttention.forward = llama_sdpa_attention_forward def configure_longlora(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool) -> None: if not is_trainable or not model_args.shift_attn: return logger = get_logger(__name__) if getattr(config, "model_type", None) in SUPPORTED_CLASS_FOR_S2ATTN: setattr(config, "group_size_ratio", 0.25) _apply_llama_patch() logger.info("Using shift short attention with group_size_ratio=1/4.") else: logger.warning("Current model does not support shift short attention.")
LLaMA-Factory/src/llamafactory/model/model_utils/longlora.py/0
{ "file_path": "LLaMA-Factory/src/llamafactory/model/model_utils/longlora.py", "repo_id": "LLaMA-Factory", "token_count": 6273 }
9
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's TRL library. # https://github.com/huggingface/trl/blob/v0.8.0/trl/trainer/ppo_trainer.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os import sys import warnings from types import MethodType from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import torch from accelerate.utils import DistributedDataParallelKwargs from tqdm import tqdm from transformers import GenerationConfig, Trainer, TrainerControl, TrainerState from transformers.optimization import get_scheduler from transformers.trainer_pt_utils import remove_dummy_checkpoint from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from transformers.utils import SAFE_WEIGHTS_NAME, WEIGHTS_NAME from trl import PPOConfig, PPOTrainer from trl.core import PPODecorators, logprobs_from_logits from trl.models.utils import unwrap_model_for_generation from ...extras.callbacks import FixValueHeadModelCallback, LogCallback from ...extras.logging import get_logger from ...extras.misc import AverageMeter, count_parameters, get_current_device, get_logits_processor from ..trainer_utils import create_custom_optimzer, create_custom_scheduler from .ppo_utils import dump_layernorm, get_rewards_from_server, replace_model, restore_layernorm if TYPE_CHECKING: from datasets import Dataset from transformers import ( DataCollatorWithPadding, PreTrainedTokenizer, ProcessorMixin, Seq2SeqTrainingArguments, TrainerCallback, ) from trl import AutoModelForCausalLMWithValueHead from ...hparams import FinetuningArguments, GeneratingArguments, ModelArguments logger = get_logger(__name__) class CustomPPOTrainer(PPOTrainer, Trainer): r""" Inherits PPOTrainer. """ def __init__( self, model_args: "ModelArguments", training_args: "Seq2SeqTrainingArguments", finetuning_args: "FinetuningArguments", generating_args: "GeneratingArguments", callbacks: List["TrainerCallback"], model: "AutoModelForCausalLMWithValueHead", reward_model: Optional["AutoModelForCausalLMWithValueHead"], ref_model: Optional["AutoModelForCausalLMWithValueHead"], tokenizer: "PreTrainedTokenizer", processor: Optional["ProcessorMixin"], dataset: "Dataset", data_collator: "DataCollatorWithPadding", ): backward_batch_size = training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps ppo_config = PPOConfig( model_name=model_args.model_name_or_path, learning_rate=training_args.learning_rate, mini_batch_size=training_args.per_device_train_batch_size, batch_size=backward_batch_size * finetuning_args.ppo_buffer_size, gradient_accumulation_steps=training_args.gradient_accumulation_steps, ppo_epochs=finetuning_args.ppo_epochs, max_grad_norm=training_args.max_grad_norm, seed=training_args.seed, optimize_device_cache=True, target=finetuning_args.ppo_target, use_score_scaling=finetuning_args.ppo_score_norm, use_score_norm=finetuning_args.ppo_score_norm, whiten_rewards=finetuning_args.ppo_whiten_rewards, accelerator_kwargs={"step_scheduler_with_optimizer": False}, log_with=training_args.report_to[0] if training_args.report_to else None, project_kwargs={"logging_dir": training_args.logging_dir}, ) # Add deepspeed config if training_args.deepspeed_plugin is not None: ppo_config.accelerator_kwargs["kwargs_handlers"] = [ DistributedDataParallelKwargs(find_unused_parameters=training_args.ddp_find_unused_parameters) ] ppo_config.accelerator_kwargs["deepspeed_plugin"] = training_args.deepspeed_plugin # Create optimizer and scheduler if training_args.max_steps > 0: num_training_steps = training_args.max_steps else: total_train_batch_size = backward_batch_size * finetuning_args.ppo_buffer_size * training_args.world_size num_training_steps = training_args.num_train_epochs * math.ceil(len(dataset) / total_train_batch_size) optimizer = self.create_optimizer(model, training_args, finetuning_args) scheduler = self.create_scheduler(training_args, num_training_steps, optimizer) PPOTrainer.__init__( self, config=ppo_config, model=model, ref_model=ref_model, tokenizer=tokenizer, dataset=dataset, data_collator=data_collator, lr_scheduler=scheduler, ) self.args = training_args self.model_args = model_args self.finetuning_args = finetuning_args self.reward_model = reward_model self.current_device = get_current_device() # patch for deepspeed training self.processor = processor self.generation_config = GenerationConfig( pad_token_id=self.tokenizer.pad_token_id, eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids, **generating_args.to_dict(), ) self.state = TrainerState() self.control = TrainerControl() self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None self.log_callback, self.save_callback = callbacks[0], callbacks[1] assert isinstance(self.log_callback, LogCallback) and isinstance(self.save_callback, FixValueHeadModelCallback) if self.args.max_steps > 0: logger.info("max_steps is given, it will override any value given in num_train_epochs") unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model) self.is_chatglm_model = getattr(unwrapped_model.config, "model_type", None) == "chatglm" self.amp_context = torch.autocast(self.current_device.type, dtype=self.model_args.compute_dtype) warnings.simplefilter("ignore") # remove gc warnings on ref model if finetuning_args.reward_model_type == "full": if self.is_deepspeed_enabled: if not ( getattr(reward_model.pretrained_model, "is_loaded_in_8bit", False) or getattr(reward_model.pretrained_model, "is_loaded_in_4bit", False) ): # quantized models are already set on the correct device self.reward_model = self._prepare_deepspeed(self.reward_model) else: self.reward_model = self.accelerator.prepare_model(self.reward_model, evaluation_mode=True) if finetuning_args.use_badam: from badam import BAdamCallback, clip_grad_norm_old_version self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator) self.callback_handler.add_callback(BAdamCallback) def ppo_train(self, resume_from_checkpoint: Optional[str] = None) -> None: r""" Implements training loop for the PPO stage, like _inner_training_loop() in Huggingface's Trainer. """ if resume_from_checkpoint is not None: raise ValueError("`resume_from_checkpoint` will be supported in the future version.") total_train_batch_size = ( self.args.per_device_train_batch_size * self.args.gradient_accumulation_steps * self.finetuning_args.ppo_buffer_size * self.args.world_size ) if self.args.max_steps > 0: num_examples = total_train_batch_size * self.args.max_steps num_train_epochs = sys.maxsize max_steps = self.args.max_steps steps_in_epoch = self.args.max_steps else: len_dataloader = len(self.dataloader) num_examples = len(self.dataset) num_train_epochs = self.args.num_train_epochs max_steps = math.ceil(num_train_epochs * len_dataloader) steps_in_epoch = len_dataloader self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() if self.is_world_process_zero(): logger.info("***** Running training *****") logger.info(" Num examples = {:,}".format(num_examples)) logger.info(" Num Epochs = {:,}".format(num_train_epochs)) logger.info(" Instantaneous batch size per device = {:,}".format(self.args.per_device_train_batch_size)) logger.info( " Total train batch size (w. parallel, buffer, distributed & accumulation) = {:,}".format( total_train_batch_size ) ) logger.info(" Gradient Accumulation steps = {:,}".format(self.args.gradient_accumulation_steps)) logger.info(" Num optimization epochs per batch = {:,}".format(self.finetuning_args.ppo_epochs)) logger.info(" Total training steps = {:,}".format(max_steps)) logger.info(" Number of trainable parameters = {:,}".format(count_parameters(self.model)[0])) dataiter = iter(self.dataloader) loss_meter = AverageMeter() reward_meter = AverageMeter() self.log_callback.on_train_begin(self.args, self.state, self.control) for step in tqdm(range(max_steps), disable=not self.is_local_process_zero()): try: batch = next(dataiter) except StopIteration: dataiter = iter(self.dataloader) batch = next(dataiter) # Get inputs self.model.eval() self.tokenizer.padding_side = "right" # change padding side queries, responses, rewards = [], [], [] for idx in range(0, self.config.batch_size, self.config.mini_batch_size): mini_batch_queries, mini_batch_responses = self.get_inputs( batch[idx : idx + self.config.mini_batch_size] ) mini_batch_rewards = self.get_rewards(mini_batch_queries, mini_batch_responses) queries.extend(mini_batch_queries) responses.extend(mini_batch_responses) rewards.extend(mini_batch_rewards) # Run PPO step self.model.train() stats = self.step(queries, responses, rewards) self.tokenizer.padding_side = "left" # restore padding side loss_meter.update(float(stats["ppo/loss/total"]), n=len(rewards)) reward_meter.update(torch.stack(rewards).mean().item(), n=len(rewards)) if self.config.log_with is not None: try: batch["query"] = self.tokenizer.batch_decode(queries, skip_special_tokens=True) batch["response"] = self.tokenizer.batch_decode(responses, skip_special_tokens=True) self.log_stats(stats, batch, rewards) except Exception: logger.warning("Failed to save stats due to unknown errors.") self.state.global_step += 1 self.log_callback.on_step_end(self.args, self.state, self.control) if self.is_local_process_zero() and (step + 1) % self.args.logging_steps == 0: logs = dict( loss=round(loss_meter.avg, 4), reward=round(reward_meter.avg, 4), learning_rate=stats["ppo/learning_rate"], epoch=round(step / steps_in_epoch, 2), ) tqdm.write(str(logs)) logs["step"] = step self.state.log_history.append(logs) self.log_callback.on_log(self.args, self.state, self.control) loss_meter.reset() reward_meter.reset() if (step + 1) % self.args.save_steps == 0: # save checkpoint self.save_model( os.path.join(self.args.output_dir, "{}-{}".format(PREFIX_CHECKPOINT_DIR, self.state.global_step)) ) self.save_callback.on_save( self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model) ) if self.control.should_epoch_stop or self.control.should_training_stop: break self.log_callback.on_train_end(self.args, self.state, self.control) self.save_callback.on_train_end( self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model) ) def create_optimizer( self, model: "AutoModelForCausalLMWithValueHead", training_args: "Seq2SeqTrainingArguments", finetuning_args: "FinetuningArguments", ) -> "torch.optim.Optimizer": optimizer = create_custom_optimzer(model, training_args, finetuning_args) if optimizer is None: decay_params, nodecay_params = [], [] decay_param_names = self.get_decay_parameter_names(model) for name, param in model.named_parameters(): if param.requires_grad: if name in decay_param_names: decay_params.append(param) else: nodecay_params.append(param) optim_class, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args) param_groups = [ dict(params=nodecay_params), dict(params=decay_params, weight_decay=training_args.weight_decay), ] optimizer = optim_class(param_groups, **optim_kwargs) return optimizer def create_scheduler( self, training_args: "Seq2SeqTrainingArguments", num_training_steps: int, optimizer: "torch.optim.Optimizer" ) -> "torch.optim.lr_scheduler.LRScheduler": create_custom_scheduler(training_args, num_training_steps, optimizer) lr_scheduler = get_scheduler( training_args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=training_args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, ) return lr_scheduler @torch.no_grad() def get_inputs(self, batch: Dict[str, "torch.Tensor"]) -> Tuple[List["torch.Tensor"], List["torch.Tensor"]]: r""" Generates model's responses given queries. """ if batch["input_ids"].size(0) == 1: # handle llama2 ppo with gradient accumulation > 1 start_index = (batch["input_ids"][0] != self.tokenizer.pad_token_id).nonzero()[0].item() for k, v in batch.items(): batch[k] = v[:, start_index:] with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model: unwrapped_model = self.accelerator.unwrap_model(self.model) # issue in trl v0.8.6 if self.model_args.upcast_layernorm: layernorm_params = dump_layernorm(unwrapped_model) generate_output: torch.Tensor = unwrapped_model.generate( generation_config=self.generation_config, logits_processor=get_logits_processor(), **batch ) if self.model_args.upcast_layernorm: restore_layernorm(unwrapped_model, layernorm_params) query = batch["input_ids"].detach().cpu() response = generate_output[:, batch["input_ids"].size(-1) :].detach().cpu() queries, responses = [], [] for i in range(len(query)): query_start_index = (query[i] != self.tokenizer.pad_token_id).nonzero()[0].item() response_index = (response[i] != self.tokenizer.pad_token_id).nonzero() if len(response_index) == 0: response_length = 1 # allow empty response else: response_length = response_index[-1].item() + 1 queries.append(query[i, query_start_index:]) # remove padding from left responses.append(response[i, :response_length]) # remove padding from right return queries, responses @torch.no_grad() def get_rewards( self, queries: List["torch.Tensor"], responses: List["torch.Tensor"], ) -> List["torch.Tensor"]: r""" Computes scores using given reward model. Both inputs and outputs are put on CPU. """ if self.finetuning_args.reward_model_type == "api": token_ids = [torch.cat((q, r), dim=-1).tolist() for q, r in zip(queries, responses)] messages = self.tokenizer.batch_decode(token_ids, skip_special_tokens=True) return get_rewards_from_server(self.reward_model, messages) batch = self.prepare_model_inputs(queries, responses) unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model) if self.finetuning_args.reward_model_type == "lora": replace_model(unwrapped_model, target="reward") reward_model = self.model else: reward_model = self.reward_model with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16 _, _, values = reward_model(**batch, output_hidden_states=True, return_dict=True, use_cache=False) if self.finetuning_args.reward_model_type == "lora": replace_model(unwrapped_model, target="default") if self.is_chatglm_model: # assume same architecture values = torch.transpose(values, 0, 1) rewards = [] for i in range(values.size(0)): end_indexes = (batch["input_ids"][i] != self.tokenizer.pad_token_id).nonzero() end_index = end_indexes[-1].item() if len(end_indexes) else 0 rewards.append(values[i, end_index].float().detach().cpu()) # use fp32 type return rewards @PPODecorators.empty_device_cache() def batched_forward_pass( self, model: "AutoModelForCausalLMWithValueHead", queries: "torch.Tensor", responses: "torch.Tensor", model_inputs: Dict[str, Any], return_logits: bool = False, response_masks: Optional["torch.Tensor"] = None, ) -> Tuple["torch.Tensor", Optional["torch.Tensor"], "torch.Tensor", "torch.Tensor"]: r""" Calculates model outputs in multiple batches. Subclass and override to inject custom behavior. """ bs = len(queries) fbs = self.config.mini_batch_size all_logprobs = [] all_logits = [] all_masks = [] all_values = [] for i in range(math.ceil(bs / fbs)): input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()} query_batch = queries[i * fbs : (i + 1) * fbs] response_batch = responses[i * fbs : (i + 1) * fbs] if response_masks is not None: response_masks_batch = response_masks[i * fbs : (i + 1) * fbs] input_ids = input_kwargs["input_ids"] attention_mask = input_kwargs["attention_mask"] with self.amp_context: # support bf16 logits, _, values = model(**input_kwargs) if self.is_chatglm_model: values = torch.transpose(values, 0, 1) logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:]) masks = torch.zeros_like(attention_mask) masks[:, :-1] = attention_mask[:, 1:] for j in range(len(query_batch)): start = len(query_batch[j]) - 1 if attention_mask[j, 0] == 0: # offset left padding start += attention_mask[j, :].nonzero()[0].item() end = start + len(response_batch[j]) if response_masks is not None: response_masks_batch = torch.cat((torch.zeros_like(query_batch[j]), response_masks_batch[j]))[1:] masks[j, :start] = 0 masks[j, end:] = 0 if response_masks is not None: masks[j, start:end] = masks[j, start:end] * response_masks_batch[j][start:end] if return_logits: all_logits.append(logits) else: del logits all_values.append(values) all_logprobs.append(logprobs) all_masks.append(masks) return ( torch.cat(all_logprobs), torch.cat(all_logits)[:, :-1] if return_logits else None, torch.cat(all_values)[:, :-1], torch.cat(all_masks)[:, :-1], ) def save_model(self, output_dir: Optional[str] = None) -> None: r""" Saves model checkpoint. Subclass and override to inject custom behavior. """ if output_dir is None: output_dir = self.args.output_dir if self.is_fsdp_enabled or self.is_deepspeed_enabled: try: state_dict = self.accelerator.get_state_dict(self.model) # must be called at all ranks if self.args.should_save: self._save(output_dir, state_dict=state_dict) except ValueError: logger.warning( " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead," " use zero_to_fp32.py to recover weights" ) if self.args.should_save: self._save(output_dir, state_dict={}) # remove the dummy state_dict remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) self.model.save_checkpoint(output_dir) elif self.args.should_save: self._save(output_dir) if self.processor is not None and self.args.should_save: output_dir = output_dir if output_dir is not None else self.args.output_dir getattr(self.processor, "image_processor").save_pretrained(output_dir)
LLaMA-Factory/src/llamafactory/train/ppo/trainer.py/0
{ "file_path": "LLaMA-Factory/src/llamafactory/train/ppo/trainer.py", "repo_id": "LLaMA-Factory", "token_count": 10459 }
10
# Copyright 2024 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Dict, Tuple from ...data import Role from ...extras.packages import is_gradio_available from ..utils import check_json_schema if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component from ..engine import Engine def create_chat_box( engine: "Engine", visible: bool = False ) -> Tuple["Component", "Component", Dict[str, "Component"]]: with gr.Column(visible=visible) as chat_box: chatbot = gr.Chatbot(show_copy_button=True) messages = gr.State([]) with gr.Row(): with gr.Column(scale=4): with gr.Row(): with gr.Column(): role = gr.Dropdown(choices=[Role.USER.value, Role.OBSERVATION.value], value=Role.USER.value) system = gr.Textbox(show_label=False) tools = gr.Textbox(show_label=False, lines=3) with gr.Column() as image_box: image = gr.Image(sources=["upload"], type="numpy") query = gr.Textbox(show_label=False, lines=8) submit_btn = gr.Button(variant="primary") with gr.Column(scale=1): max_new_tokens = gr.Slider(minimum=8, maximum=4096, value=512, step=1) top_p = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.01) temperature = gr.Slider(minimum=0.01, maximum=1.5, value=0.95, step=0.01) clear_btn = gr.Button() tools.input(check_json_schema, inputs=[tools, engine.manager.get_elem_by_id("top.lang")]) submit_btn.click( engine.chatter.append, [chatbot, messages, role, query], [chatbot, messages, query], ).then( engine.chatter.stream, [chatbot, messages, system, tools, image, max_new_tokens, top_p, temperature], [chatbot, messages], ) clear_btn.click(lambda: ([], []), outputs=[chatbot, messages]) return ( chatbot, messages, dict( chat_box=chat_box, role=role, system=system, tools=tools, image_box=image_box, image=image, query=query, submit_btn=submit_btn, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature, clear_btn=clear_btn, ), )
LLaMA-Factory/src/llamafactory/webui/components/chatbot.py/0
{ "file_path": "LLaMA-Factory/src/llamafactory/webui/components/chatbot.py", "repo_id": "LLaMA-Factory", "token_count": 1353 }
11
# Copyright 2024 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from llamafactory.data.formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter def test_empty_formatter(): formatter = EmptyFormatter(slots=["\n"]) assert formatter.apply() == ["\n"] def test_string_formatter(): formatter = StringFormatter(slots=["<s>", "Human: {{content}}\nAssistant:"]) assert formatter.apply(content="Hi") == ["<s>", "Human: Hi\nAssistant:"] def test_function_formatter(): formatter = FunctionFormatter(slots=["Action: {{name}}\nAction Input: {{arguments}}\n"]) tool_calls = json.dumps({"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}) assert formatter.apply(content=tool_calls) == [ """Action: tool_name\nAction Input: {\"foo\": \"bar\", \"size\": 10}\n""" ] def test_multi_function_formatter(): formatter = FunctionFormatter(slots=["Action: {{name}}\nAction Input: {{arguments}}\n"]) tool_calls = json.dumps([{"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}] * 2) assert formatter.apply(content=tool_calls) == [ """Action: tool_name\nAction Input: {\"foo\": \"bar\", \"size\": 10}\n""", """Action: tool_name\nAction Input: {\"foo\": \"bar\", \"size\": 10}\n""", ] def test_default_tool_formatter(): formatter = ToolFormatter(tool_format="default") tools = [ { "name": "test_tool", "description": "tool_desc", "parameters": { "type": "object", "properties": { "foo": {"type": "string", "description": "foo_desc"}, "bar": {"type": "number", "description": "bar_desc"}, }, "required": ["foo"], }, } ] assert formatter.apply(content=json.dumps(tools)) == [ "You have access to the following tools:\n" "> Tool Name: test_tool\n" "Tool Description: tool_desc\n" "Tool Args:\n" " - foo (string, required): foo_desc\n" " - bar (number): bar_desc\n\n" "Use the following format if using a tool:\n" "```\n" "Action: tool name (one of [test_tool]).\n" "Action Input: the input to the tool, in a JSON format representing the kwargs " """(e.g. ```{"input": "hello world", "num_beams": 5}```).\n""" "```\n" ] def test_default_tool_extractor(): formatter = ToolFormatter(tool_format="default") result = """Action: test_tool\nAction Input: {"foo": "bar", "size": 10}\n""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] def test_default_multi_tool_extractor(): formatter = ToolFormatter(tool_format="default") result = ( """Action: test_tool\nAction Input: {"foo": "bar", "size": 10}\n""" """Action: another_tool\nAction Input: {"foo": "job", "size": 2}\n""" ) assert formatter.extract(result) == [ ("test_tool", """{"foo": "bar", "size": 10}"""), ("another_tool", """{"foo": "job", "size": 2}"""), ] def test_glm4_tool_formatter(): formatter = ToolFormatter(tool_format="glm4") tools = [ { "name": "test_tool", "description": "tool_desc", "parameters": { "type": "object", "properties": { "foo": {"type": "string", "description": "foo_desc"}, "bar": {"type": "number", "description": "bar_desc"}, }, "required": ["foo"], }, } ] assert formatter.apply(content=json.dumps(tools)) == [ "你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的," "你的任务是针对用户的问题和要求提供适当的答复和支持。# 可用工具\n\n" "## test_tool\n\n{}\n在调用上述函数时,请使用 Json 格式表示调用的参数。".format(json.dumps(tools[0], indent=4)) ] def test_glm4_tool_extractor(): formatter = ToolFormatter(tool_format="glm4") result = """test_tool\n{"foo": "bar", "size": 10}\n""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
LLaMA-Factory/tests/data/test_formatter.py/0
{ "file_path": "LLaMA-Factory/tests/data/test_formatter.py", "repo_id": "LLaMA-Factory", "token_count": 2113 }
12
<?xml version="1.0" encoding="UTF-8"?> <project version="4"> <component name="AutoImportSettings"> <option name="autoReloadType" value="SELECTIVE" /> </component> <component name="ChangeListManager"> <list default="true" id="4330c669-8ee3-402a-ad3d-93949ea0d4a7" name="Changes" comment="集成logger&amp;测试batch size对模型gpu使用率的影响&amp;重用py端的聚类算法实现对trace_id的reid分配&amp;http路由重构,简易支持了path路由&amp;&#10;增加聚类触发的http接口"> <change beforePath="$PROJECT_DIR$/deploy/pipeline/database_model.py" beforeDir="false" afterPath="$PROJECT_DIR$/deploy/pipeline/database_model.py" afterDir="false" /> <change beforePath="$PROJECT_DIR$/deploy/pipeline/pipeline.py" beforeDir="false" afterPath="$PROJECT_DIR$/deploy/pipeline/pipeline.py" afterDir="false" /> <change beforePath="$PROJECT_DIR$/deploy/pipeline/pphuman/mtmct.py" beforeDir="false" afterPath="$PROJECT_DIR$/deploy/pipeline/pphuman/mtmct.py" afterDir="false" /> <change beforePath="$PROJECT_DIR$/deploy/pptracking/python/mot/utils.py" beforeDir="false" afterPath="$PROJECT_DIR$/deploy/pptracking/python/mot/utils.py" afterDir="false" /> </list> <option name="SHOW_DIALOG" value="false" /> <option name="HIGHLIGHT_CONFLICTS" value="true" /> <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> <option name="LAST_RESOLUTION" value="IGNORE" /> </component> <component name="FileTemplateManagerImpl"> <option name="RECENT_TEMPLATES"> <list> <option value="Python Script" /> </list> </option> </component> <component name="FlaskConsoleOptions" custom-start-script="import sys&#10;sys.path.extend([WORKING_DIR_AND_PYTHON_PATHS])&#10;from flask.cli import ScriptInfo&#10;locals().update(ScriptInfo(create_app=None).load_app().make_shell_context())&#10;print(&quot;Python %s on %s\nApp: %s [%s]\nInstance: %s&quot; % (sys.version, sys.platform, app.import_name, app.env, app.instance_path))"> <envs> <env key="FLASK_APP" value="app" /> </envs> <option name="myCustomStartScript" value="import sys&#10;sys.path.extend([WORKING_DIR_AND_PYTHON_PATHS])&#10;from flask.cli import ScriptInfo&#10;locals().update(ScriptInfo(create_app=None).load_app().make_shell_context())&#10;print(&quot;Python %s on %s\nApp: %s [%s]\nInstance: %s&quot; % (sys.version, sys.platform, app.import_name, app.env, app.instance_path))" /> <option name="myEnvs"> <map> <entry key="FLASK_APP" value="app" /> </map> </option> </component> <component name="Git.Settings"> <option name="RECENT_BRANCH_BY_REPOSITORY"> <map> <entry key="$PROJECT_DIR$" value="release/2.7-self" /> </map> </option> <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" /> <option name="RESET_MODE" value="HARD" /> </component> <component name="HighlightingSettingsPerFile"> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/python_stubs/-2066100795/paddle/base/libpaddle/PaddleInferPredictor.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/python_stubs/-2066100795/paddle/base/libpaddle/PaddleInferTensor.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/-258238062/1015593779/sqlalchemy/orm/decl_api.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/-258238062/1015593779/sqlalchemy/sql/_selectable_constructors.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/-258238062/1015593779/sqlalchemy/sql/selectable.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/-258238062/1952936618/http/server.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/-258238062/1952936618/socketserver.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/-258238062/1952936618/typing.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/952692696/1015593779/PIL/Image.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/952692696/1015593779/PIL/JpegImagePlugin.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/952692696/1015593779/sqlalchemy/engine/create.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/952692696/1952936618/argparse.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/952692696/1952936618/base64.py" root0="SKIP_INSPECTION" /> <setting file="file://$USER_HOME$/.cache/JetBrains/PyCharm2024.1/remote_sources/952692696/1952936618/json/__init__.py" root0="SKIP_INSPECTION" /> <setting file="file://$APPLICATION_HOME_DIR$/plugins/python/helpers/typeshed/stdlib/typing.pyi" root0="SKIP_INSPECTION" /> <setting file="file:///usr/lib/python3.10/http/server.py" root0="SKIP_INSPECTION" /> </component> <component name="ProblemsViewState"> <option name="selectedTabId" value="ProjectErrors" /> </component> <component name="ProjectColorInfo">{ &quot;associatedIndex&quot;: 4 }</component> <component name="ProjectId" id="2fAuhSSG8pohUkiwq6rtnZEwyEz" /> <component name="ProjectViewState"> <option name="hideEmptyMiddlePackages" value="true" /> <option name="showLibraryContents" value="true" /> </component> <component name="PropertiesComponent">{ &quot;keyToString&quot;: { &quot;Docker.docker-compose.yml.nvidia-dev: Compose 部署.executor&quot;: &quot;Run&quot;, &quot;Python.image_to_vector_service.executor&quot;: &quot;Debug&quot;, &quot;Python.pipeline-all.executor&quot;: &quot;Run&quot;, &quot;Python.pipeline-img.executor&quot;: &quot;Debug&quot;, &quot;Python.pipeline-rtsp.executor&quot;: &quot;Debug&quot;, &quot;Python.pipeline.executor&quot;: &quot;Debug&quot;, &quot;Python.test_recognition.executor&quot;: &quot;Debug&quot;, &quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;, &quot;git-widget-placeholder&quot;: &quot;sub-count&quot;, &quot;last_opened_file_path&quot;: &quot;/home/ubuntu/wensimin-work/PaddleDetection/docker-compose-http.yml&quot;, &quot;node.js.detected.package.eslint&quot;: &quot;true&quot;, &quot;node.js.detected.package.tslint&quot;: &quot;true&quot;, &quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;, &quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;, &quot;nodejs_package_manager_path&quot;: &quot;npm&quot;, &quot;vue.rearranger.settings.migration&quot;: &quot;true&quot; } }</component> <component name="PyConsoleOptionsProvider"> <option name="myPythonConsoleState"> <console-settings module-name="PaddleDetection" is-module-sdk="true"> <option name="myUseModuleSdk" value="true" /> <option name="myModuleName" value="PaddleDetection" /> </console-settings> </option> </component> <component name="RdControllerToolWindowsLayoutState" isNewUi="true"> <layout> <window_info id="Bookmarks" show_stripe_button="false" side_tool="true" /> <window_info id="Merge Requests" show_stripe_button="false" /> <window_info id="Commit_Guest" show_stripe_button="false" /> <window_info id="Pull Requests" show_stripe_button="false" /> <window_info id="Learn" show_stripe_button="false" /> <window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.19710402" /> <window_info id="Commit" order="1" weight="0.19710402" /> <window_info id="Structure" order="2" side_tool="true" weight="0.25" /> <window_info anchor="bottom" id="Database Changes" show_stripe_button="false" /> <window_info anchor="bottom" id="TypeScript" show_stripe_button="false" /> <window_info anchor="bottom" id="File Transfer" show_stripe_button="false" /> <window_info anchor="bottom" id="Find" /> <window_info anchor="bottom" id="Version Control" order="0" weight="0.32987142" /> <window_info anchor="bottom" id="Problems" order="1" /> <window_info anchor="bottom" id="Problems View" order="2" weight="0.40358528" /> <window_info anchor="bottom" id="Terminal" order="3" weight="0.4417808" /> <window_info anchor="bottom" id="Services" order="4" weight="0.3302257" /> <window_info anchor="bottom" id="Debug" order="5" visible="true" /> <window_info anchor="bottom" id="Python Packages" order="6" weight="0.5078508" /> <window_info anchor="bottom" id="Python Console" order="7" weight="0.39892054" /> <window_info anchor="bottom" id="并发活动图" order="8" weight="0.37340528" /> <window_info anchor="bottom" id="TODO" order="9" weight="0.32987142" /> <window_info active="true" anchor="bottom" id="Run" order="10" visible="true" weight="0.318002" /> <window_info anchor="right" id="Endpoints" show_stripe_button="false" /> <window_info anchor="right" id="Coverage" show_stripe_button="false" side_tool="true" /> <window_info anchor="right" content_ui="combo" id="Notifications" order="0" weight="0.25" /> <window_info anchor="right" id="AIAssistant" order="1" weight="0.25" /> <window_info anchor="right" id="Database" order="2" weight="0.25" /> <window_info anchor="right" id="Gradle" order="3" weight="0.25" /> <window_info anchor="right" id="Maven" order="4" weight="0.25" /> <window_info anchor="right" id="Plots" order="5" weight="0.1" /> <window_info active="true" anchor="right" id="Continue" order="6" visible="true" weight="0.33008274" /> <window_info anchor="right" id="Translation.Wordbook" order="7" show_stripe_button="false" side_tool="true" /> <window_info anchor="right" id="SciView" order="8" weight="0.7372931" /> </layout> </component> <component name="RecentsManager"> <key name="CopyFile.RECENT_KEYS"> <recent name="$PROJECT_DIR$" /> <recent name="$PROJECT_DIR$/deploy/pipeline" /> </key> <key name="MoveFile.RECENT_KEYS"> <recent name="$PROJECT_DIR$/deploy/pipeline/config" /> <recent name="$PROJECT_DIR$/input" /> </key> </component> <component name="RunManager" selected="Python.pipeline-rtsp"> <configuration name="image_to_vector_service" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> <module name="PaddleDetection" /> <option name="ENV_FILES" value="" /> <option name="INTERPRETER_OPTIONS" value="" /> <option name="PARENT_ENVS" value="true" /> <envs> <env name="PYTHONUNBUFFERED" value="1" /> </envs> <option name="SDK_HOME" value="" /> <option name="SDK_NAME" value="Remote Python 3.10.13 Docker Compose (nvidia-dev) (2)" /> <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/" /> <option name="IS_MODULE_SDK" value="false" /> <option name="ADD_CONTENT_ROOTS" value="true" /> <option name="ADD_SOURCE_ROOTS" value="true" /> <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> <option name="SCRIPT_NAME" value="$PROJECT_DIR$/deploy/pipeline/image_to_vector_service.py" /> <option name="PARAMETERS" value="--config deploy/pipeline/config/infer_cfg_pphuman.yml --device=GPU" /> <option name="SHOW_COMMAND_LINE" value="false" /> <option name="EMULATE_TERMINAL" value="false" /> <option name="MODULE_MODE" value="false" /> <option name="REDIRECT_INPUT" value="false" /> <option name="INPUT_FILE" value="" /> <method v="2" /> </configuration> <configuration name="pipeline-all" type="PythonConfigurationType" factoryName="Python"> <module name="PaddleDetection" /> <option name="ENV_FILES" value="" /> <option name="INTERPRETER_OPTIONS" value="" /> <option name="PARENT_ENVS" value="true" /> <envs> <env name="PYTHONUNBUFFERED" value="1" /> </envs> <option name="SDK_HOME" value="" /> <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/" /> <option name="IS_MODULE_SDK" value="false" /> <option name="ADD_CONTENT_ROOTS" value="true" /> <option name="ADD_SOURCE_ROOTS" value="true" /> <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> <option name="SCRIPT_NAME" value="$PROJECT_DIR$/deploy/pipeline/pipeline.py" /> <option name="PARAMETERS" value="--config deploy/pipeline/config/infer_cfg_pphuman.yml --video_dir=input --device=gpu" /> <option name="SHOW_COMMAND_LINE" value="false" /> <option name="EMULATE_TERMINAL" value="false" /> <option name="MODULE_MODE" value="false" /> <option name="REDIRECT_INPUT" value="false" /> <option name="INPUT_FILE" value="" /> <method v="2" /> </configuration> <configuration name="pipeline-img" type="PythonConfigurationType" factoryName="Python"> <module name="PaddleDetection" /> <option name="ENV_FILES" value="" /> <option name="INTERPRETER_OPTIONS" value="" /> <option name="PARENT_ENVS" value="true" /> <envs> <env name="PYTHONUNBUFFERED" value="1" /> </envs> <option name="SDK_HOME" value="" /> <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/" /> <option name="IS_MODULE_SDK" value="true" /> <option name="ADD_CONTENT_ROOTS" value="true" /> <option name="ADD_SOURCE_ROOTS" value="true" /> <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> <option name="SCRIPT_NAME" value="$PROJECT_DIR$/deploy/pipeline/pipeline.py" /> <option name="PARAMETERS" value="--config deploy/pipeline/config/infer_cfg_pphuman.yml --image_dir=input-img --device=gpu" /> <option name="SHOW_COMMAND_LINE" value="false" /> <option name="EMULATE_TERMINAL" value="false" /> <option name="MODULE_MODE" value="false" /> <option name="REDIRECT_INPUT" value="false" /> <option name="INPUT_FILE" value="" /> <method v="2" /> </configuration> <configuration name="pipeline-rtsp" type="PythonConfigurationType" factoryName="Python"> <module name="PaddleDetection" /> <option name="ENV_FILES" value="" /> <option name="INTERPRETER_OPTIONS" value="" /> <option name="PARENT_ENVS" value="true" /> <envs> <env name="PYTHONUNBUFFERED" value="1" /> </envs> <option name="SDK_HOME" value="" /> <option name="SDK_NAME" value="Remote Python 3.10.13 Docker Compose (nvidia-dev)" /> <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/" /> <option name="IS_MODULE_SDK" value="false" /> <option name="ADD_CONTENT_ROOTS" value="true" /> <option name="ADD_SOURCE_ROOTS" value="true" /> <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> <option name="SCRIPT_NAME" value="$PROJECT_DIR$/deploy/pipeline/pipeline.py" /> <option name="PARAMETERS" value="--config deploy/pipeline/config/infer_cfg_pphuman.yml --rtsp=rtsp://admin:nudt@2024@127.0.0.1:15541/Streaming/Channels/101 --device=GPU --do_entrance_counting" /> <option name="SHOW_COMMAND_LINE" value="false" /> <option name="EMULATE_TERMINAL" value="false" /> <option name="MODULE_MODE" value="false" /> <option name="REDIRECT_INPUT" value="false" /> <option name="INPUT_FILE" value="" /> <method v="2" /> </configuration> <configuration name="pipeline" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> <module name="PaddleDetection" /> <option name="ENV_FILES" value="" /> <option name="INTERPRETER_OPTIONS" value="" /> <option name="PARENT_ENVS" value="true" /> <envs> <env name="PYTHONUNBUFFERED" value="1" /> </envs> <option name="SDK_HOME" value="" /> <option name="SDK_NAME" value="Remote Python 3.10.13 Docker Compose (nvidia-dev)" /> <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/" /> <option name="IS_MODULE_SDK" value="false" /> <option name="ADD_CONTENT_ROOTS" value="true" /> <option name="ADD_SOURCE_ROOTS" value="true" /> <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> <option name="SCRIPT_NAME" value="$PROJECT_DIR$/deploy/pipeline/pipeline.py" /> <option name="PARAMETERS" value="--config deploy/pipeline/config/infer_cfg_pphuman.yml --video_dir=input-simple --device=gpu --do_entrance_counting" /> <option name="SHOW_COMMAND_LINE" value="false" /> <option name="EMULATE_TERMINAL" value="false" /> <option name="MODULE_MODE" value="false" /> <option name="REDIRECT_INPUT" value="false" /> <option name="INPUT_FILE" value="" /> <method v="2" /> </configuration> <configuration name="test_recognition" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> <module name="PaddleDetection" /> <option name="ENV_FILES" value="" /> <option name="INTERPRETER_OPTIONS" value="" /> <option name="PARENT_ENVS" value="true" /> <envs> <env name="PYTHONUNBUFFERED" value="1" /> </envs> <option name="SDK_HOME" value="" /> <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/deploy/pipeline" /> <option name="IS_MODULE_SDK" value="true" /> <option name="ADD_CONTENT_ROOTS" value="true" /> <option name="ADD_SOURCE_ROOTS" value="true" /> <EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> <option name="SCRIPT_NAME" value="$PROJECT_DIR$/deploy/pipeline/test_recognition.py" /> <option name="PARAMETERS" value="" /> <option name="SHOW_COMMAND_LINE" value="false" /> <option name="EMULATE_TERMINAL" value="false" /> <option name="MODULE_MODE" value="false" /> <option name="REDIRECT_INPUT" value="false" /> <option name="INPUT_FILE" value="" /> <method v="2" /> </configuration> <configuration default="true" type="docker-deploy" factoryName="docker-compose.yml" temporary="true"> <deployment type="docker-compose.yml"> <settings /> </deployment> <method v="2" /> </configuration> <configuration name="docker-compose.yml.nvidia-dev: Compose 部署" type="docker-deploy" factoryName="docker-compose.yml" temporary="true" server-name="Docker"> <deployment type="docker-compose.yml"> <settings> <option name="services"> <list> <option value="nvidia-dev" /> </list> </option> <option name="sourceFilePath" value="docker-compose.yml" /> </settings> </deployment> <method v="2" /> </configuration> <list> <item itemvalue="Docker.docker-compose.yml.nvidia-dev: Compose 部署" /> <item itemvalue="Python.pipeline-img" /> <item itemvalue="Python.pipeline-all" /> <item itemvalue="Python.pipeline-rtsp" /> <item itemvalue="Python.image_to_vector_service" /> <item itemvalue="Python.test_recognition" /> <item itemvalue="Python.pipeline" /> </list> <recent_temporary> <list> <item itemvalue="Python.image_to_vector_service" /> <item itemvalue="Python.pipeline" /> <item itemvalue="Python.test_recognition" /> <item itemvalue="Docker.docker-compose.yml.nvidia-dev: Compose 部署" /> <item itemvalue="Python.pipeline" /> </list> </recent_temporary> </component> <component name="SharedIndexes"> <attachedChunks> <set> <option value="bundled-js-predefined-1d06a55b98c1-74d2a5396914-JavaScript-PY-241.14494.241" /> <option value="bundled-python-sdk-0509580d9d50-28c9f5db9ffe-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-241.14494.241" /> </set> </attachedChunks> </component> <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" /> <component name="TaskManager"> <task active="true" id="Default" summary="Default task"> <changelist id="4330c669-8ee3-402a-ad3d-93949ea0d4a7" name="Changes" comment="" /> <created>1713256325317</created> <option name="number" value="Default" /> <option name="presentableId" value="Default" /> <updated>1713256325317</updated> <workItem from="1713256326653" duration="2524000" /> <workItem from="1713259733809" duration="4368000" /> <workItem from="1713331476577" duration="12858000" /> <workItem from="1713344910793" duration="79376000" /> <workItem from="1714010166891" duration="1764000" /> <workItem from="1714013555583" duration="5326000" /> <workItem from="1714033547383" duration="37437000" /> <workItem from="1715063635926" duration="350000" /> <workItem from="1715064266973" duration="64446000" /> <workItem from="1715397963038" duration="219000" /> <workItem from="1715398201693" duration="19701000" /> <workItem from="1715566208726" duration="43724000" /> <workItem from="1715752515820" duration="27733000" /> <workItem from="1715844562996" duration="57346000" /> <workItem from="1716512757504" duration="8345000" /> <workItem from="1716538231645" duration="3903000" /> <workItem from="1716771638524" duration="49647000" /> <workItem from="1717466782112" duration="195000" /> <workItem from="1717467064467" duration="9086000" /> <workItem from="1717654176718" duration="71000" /> <workItem from="1717654974416" duration="213000" /> <workItem from="1717655257426" duration="245000" /> <workItem from="1717655563149" duration="11025000" /> </task> <task id="LOCAL-00001" summary="配置远程运行环境&amp;修改输出json文件&amp;修正队列使用方式fix大压力下无结果问题"> <option name="closed" value="true" /> <created>1713926375729</created> <option name="number" value="00001" /> <option name="presentableId" value="LOCAL-00001" /> <option name="project" value="LOCAL" /> <updated>1713926375729</updated> </task> <task id="LOCAL-00002" summary="changing"> <option name="closed" value="true" /> <created>1713946080033</created> <option name="number" value="00002" /> <option name="presentableId" value="LOCAL-00002" /> <option name="project" value="LOCAL" /> <updated>1713946080033</updated> </task> <task id="LOCAL-00003" summary="develop的dockerfile"> <option name="closed" value="true" /> <created>1713948539037</created> <option name="number" value="00003" /> <option name="presentableId" value="LOCAL-00003" /> <option name="project" value="LOCAL" /> <updated>1713948539037</updated> </task> <task id="LOCAL-00004" summary="增加脸部向量收集&amp;增加人脸检测的图片截取&amp;修复因修改数据结构破坏的归一化和分组&amp;保存track"> <option name="closed" value="true" /> <created>1715308590227</created> <option name="number" value="00004" /> <option name="presentableId" value="LOCAL-00004" /> <option name="project" value="LOCAL" /> <updated>1715308590227</updated> </task> <task id="LOCAL-00005" summary="增加一些用于追踪mot的实现方式log"> <option name="closed" value="true" /> <created>1715411863706</created> <option name="number" value="00005" /> <option name="presentableId" value="LOCAL-00005" /> <option name="project" value="LOCAL" /> <updated>1715411863706</updated> </task> <task id="LOCAL-00006" summary="实现实时解析摄像头信息保存到pgvector"> <option name="closed" value="true" /> <created>1715590040662</created> <option name="number" value="00006" /> <option name="presentableId" value="LOCAL-00006" /> <option name="project" value="LOCAL" /> <updated>1715590040662</updated> </task> <task id="LOCAL-00007" summary="加大队列buffer&amp;取消人脸识别"> <option name="closed" value="true" /> <created>1715675137546</created> <option name="number" value="00007" /> <option name="presentableId" value="LOCAL-00007" /> <option name="project" value="LOCAL" /> <updated>1715675137546</updated> </task> <task id="LOCAL-00008" summary="增加http服务器分析图片"> <option name="closed" value="true" /> <created>1715741062134</created> <option name="number" value="00008" /> <option name="presentableId" value="LOCAL-00008" /> <option name="project" value="LOCAL" /> <updated>1715741062134</updated> </task> <task id="LOCAL-00009" summary="修复web服务器的性能问题以及报错"> <option name="closed" value="true" /> <created>1715844608258</created> <option name="number" value="00009" /> <option name="presentableId" value="LOCAL-00009" /> <option name="project" value="LOCAL" /> <updated>1715844608258</updated> </task> <task id="LOCAL-00010" summary="增加保存出入记录"> <option name="closed" value="true" /> <created>1716192241610</created> <option name="number" value="00010" /> <option name="presentableId" value="LOCAL-00010" /> <option name="project" value="LOCAL" /> <updated>1716192241610</updated> </task> <task id="LOCAL-00011" summary="集成logger&amp;测试batch size对模型gpu使用率的影响&amp;重用py端的聚类算法实现对trace_id的reid分配&amp;http路由重构,简易支持了path路由&amp;&#10;增加聚类触发的http接口"> <option name="closed" value="true" /> <created>1716518553927</created> <option name="number" value="00011" /> <option name="presentableId" value="LOCAL-00011" /> <option name="project" value="LOCAL" /> <updated>1716518553927</updated> </task> <option name="localTasksCounter" value="12" /> <servers /> </component> <component name="TypeScriptGeneratedFilesManager"> <option name="version" value="3" /> </component> <component name="Vcs.Log.History.Properties"> <option name="COLUMN_ID_ORDER"> <list> <option value="Default.Root" /> <option value="Default.Author" /> <option value="Default.Date" /> <option value="Default.Subject" /> <option value="GitHub.CommitStatus" /> </list> </option> </component> <component name="Vcs.Log.Tabs.Properties"> <option name="OPEN_GENERIC_TABS"> <map> <entry key="8243640e-b249-4de3-8734-e2e6a4633821" value="TOOL_WINDOW" /> </map> </option> <option name="TAB_STATES"> <map> <entry key="8243640e-b249-4de3-8734-e2e6a4633821"> <value> <State> <option name="FILTERS"> <map> <entry key="branch"> <value> <list> <option value="HEAD" /> </list> </value> </entry> <entry key="roots"> <value> <list> <option value="$PROJECT_DIR$" /> </list> </value> </entry> </map> </option> <option name="SHOW_ONLY_AFFECTED_CHANGES" value="true" /> </State> </value> </entry> <entry key="MAIN"> <value> <State /> </value> </entry> </map> </option> </component> <component name="VcsManagerConfiguration"> <MESSAGE value="配置远程运行环境&amp;修改输出json文件&amp;修正队列使用方式fix大压力下无结果问题" /> <MESSAGE value="changing" /> <MESSAGE value="develop的dockerfile" /> <MESSAGE value="增加脸部向量收集&amp;增加人脸检测的图片截取&amp;修复因修改数据结构破坏的归一化和分组&amp;保存track" /> <MESSAGE value="增加一些用于追踪mot的实现方" /> <MESSAGE value="实现实时解析摄像头信息保存到pgvector" /> <MESSAGE value="加大队列buffer&amp;取消人脸识别" /> <MESSAGE value="增加http服务器分析图片" /> <MESSAGE value="修复web服务器的性能问题以及报错" /> <MESSAGE value="增加保存出入记录" /> <MESSAGE value="集成logger&amp;测试batch size对模型gpu使用率的影响&amp;重用py端的聚类算法实现对trace_id的reid分配&amp;http路由重构,简易支持了path路由&amp;&#10;增加聚类触发的http接口" /> <option name="LAST_COMMIT_MESSAGE" value="集成logger&amp;测试batch size对模型gpu使用率的影响&amp;重用py端的聚类算法实现对trace_id的reid分配&amp;http路由重构,简易支持了path路由&amp;&#10;增加聚类触发的http接口" /> </component> <component name="XDebuggerManager"> <breakpoint-manager> <breakpoints> <line-breakpoint enabled="true" suspend="THREAD" type="python-line"> <url>file://$PROJECT_DIR$/deploy/pipeline/test_recognition.py</url> <line>712</line> <option name="timeStamp" value="4" /> </line-breakpoint> </breakpoints> </breakpoint-manager> </component> <component name="com.intellij.coverage.CoverageDataManagerImpl"> <SUITE FILE_PATH="coverage/PaddleDetection$pipeline_all.coverage" NAME="pipeline-all 覆盖结果" MODIFIED="1714007392243" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/" /> <SUITE FILE_PATH="coverage/PaddleDetection$pipeline_img.coverage" NAME="pipeline-img 覆盖结果" MODIFIED="1713406894405" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/" /> <SUITE FILE_PATH="coverage/PaddleDetection$pipeline.coverage" NAME="pipeline 覆盖结果" MODIFIED="1715152214829" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/" /> <SUITE FILE_PATH="coverage/PaddleDetection$test_recognition.coverage" NAME="test_recognition 覆盖结果" MODIFIED="1714034491626" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/deploy/pipeline" /> <SUITE FILE_PATH="coverage/PaddleDetection$image_to_vector_service.coverage" NAME="image_to_vector_service 覆盖结果" MODIFIED="1716943185287" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/" /> <SUITE FILE_PATH="coverage/PaddleDetection$pipeline_rtsp.coverage" NAME="pipeline-rtsp 覆盖结果" MODIFIED="1716976144419" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/" /> </component> </project>
PaddleDetection/.idea/workspace.xml/0
{ "file_path": "PaddleDetection/.idea/workspace.xml", "repo_id": "PaddleDetection", "token_count": 13993 }
13
English | [简体中文](README_cn.md) # CenterNet (CenterNet: Objects as Points) ## Table of Contents - [Introduction](#Introduction) - [Model Zoo](#Model_Zoo) - [Citations](#Citations) ## Introduction [CenterNet](http://arxiv.org/abs/1904.07850) is an Anchor Free detector, which model an object as a single point -- the center point of its bounding box. The detector uses keypoint estimation to find center points and regresses to all other object properties. The center point based approach, CenterNet, is end-to-end differentiable, simpler, faster, and more accurate than corresponding bounding box based detectors. ## Model Zoo ### CenterNet Results on COCO-val 2017 | backbone | input shape | mAP | FPS | download | config | | :--------------| :------- | :----: | :------: | :----: |:-----: | | DLA-34(paper) | 512x512 | 37.4 | - | - | - | | DLA-34 | 512x512 | 37.6 | - | [model](https://bj.bcebos.com/v1/paddledet/models/centernet_dla34_140e_coco.pdparams) | [config](./centernet_dla34_140e_coco.yml) | | ResNet50 + DLAUp | 512x512 | 38.9 | - | [model](https://bj.bcebos.com/v1/paddledet/models/centernet_r50_140e_coco.pdparams) | [config](./centernet_r50_140e_coco.yml) | | MobileNetV1 + DLAUp | 512x512 | 28.2 | - | [model](https://bj.bcebos.com/v1/paddledet/models/centernet_mbv1_140e_coco.pdparams) | [config](./centernet_mbv1_140e_coco.yml) | | MobileNetV3_small + DLAUp | 512x512 | 17 | - | [model](https://bj.bcebos.com/v1/paddledet/models/centernet_mbv3_small_140e_coco.pdparams) | [config](./centernet_mbv3_small_140e_coco.yml) | | MobileNetV3_large + DLAUp | 512x512 | 27.1 | - | [model](https://bj.bcebos.com/v1/paddledet/models/centernet_mbv3_large_140e_coco.pdparams) | [config](./centernet_mbv3_large_140e_coco.yml) | | ShuffleNetV2 + DLAUp | 512x512 | 23.8 | - | [model](https://bj.bcebos.com/v1/paddledet/models/centernet_shufflenetv2_140e_coco.pdparams) | [config](./centernet_shufflenetv2_140e_coco.yml) | ## Citations ``` @article{zhou2019objects, title={Objects as points}, author={Zhou, Xingyi and Wang, Dequan and Kr{\"a}henb{\"u}hl, Philipp}, journal={arXiv preprint arXiv:1904.07850}, year={2019} } ```
PaddleDetection/configs/centernet/README.md/0
{ "file_path": "PaddleDetection/configs/centernet/README.md", "repo_id": "PaddleDetection", "token_count": 949 }
14
epoch: 15 snapshot_epoch: 5 LearningRate: base_lr: 0.6e-3 schedulers: - !CosineDecay max_epochs: 15 use_warmup: False OptimizerBuilder: regularizer: False optimizer: type: AdamW
PaddleDetection/configs/clrnet/_base_/optimizer_1x.yml/0
{ "file_path": "PaddleDetection/configs/clrnet/_base_/optimizer_1x.yml", "repo_id": "PaddleDetection", "token_count": 91 }
15
metric: RBOX num_classes: 9 TrainDataset: !COCODataSet image_dir: images anno_path: annotations/train.json dataset_dir: dataset/spine_coco data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_poly'] EvalDataset: !COCODataSet image_dir: images anno_path: annotations/valid.json dataset_dir: dataset/spine_coco data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_poly'] TestDataset: !ImageFolder anno_path: annotations/valid.json dataset_dir: dataset/spine_coco
PaddleDetection/configs/datasets/spine_coco.yml/0
{ "file_path": "PaddleDetection/configs/datasets/spine_coco.yml", "repo_id": "PaddleDetection", "token_count": 226 }
16
# Deformable DETR ## Introduction Deformable DETR is an object detection model based on DETR. We reproduced the model of the paper. ## Model Zoo | Backbone | Model | Images/GPU | Epochs | Box AP | Config | Log | Download | |:--------:|:---------------:|:----------:|:------:|:------:|:------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------:| | R-50 | Deformable DETR | 2 | 50 | 44.5 | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/deformable_detr/deformable_detr_r50_1x_coco.yml) | [log](https://bj.bcebos.com/v1/paddledet/logs/deformable_detr_r50_1x_coco_44.5.log) | [model](https://paddledet.bj.bcebos.com/models/deformable_detr_r50_1x_coco.pdparams) | **Notes:** - Deformable DETR is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`. - Deformable DETR uses 8GPU to train 50 epochs. GPU multi-card training ```bash export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/deformable_detr/deformable_detr_r50_1x_coco.yml --fleet ``` ## Citations ``` @inproceedings{ zhu2021deformable, title={Deformable DETR: Deformable Transformers for End-to-End Object Detection}, author={Xizhou Zhu and Weijie Su and Lewei Lu and Bin Li and Xiaogang Wang and Jifeng Dai}, booktitle={International Conference on Learning Representations}, year={2021}, url={https://openreview.net/forum?id=gZ9hCDWe6ke} } ```
PaddleDetection/configs/deformable_detr/README.md/0
{ "file_path": "PaddleDetection/configs/deformable_detr/README.md", "repo_id": "PaddleDetection", "token_count": 860 }
17
_BASE_: [ 'faster_rcnn_r50_1x_coco.yml', ] pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_pretrained.pdparams weights: output/faster_rcnn_r50_vd_1x_coco/model_final ResNet: # index 0 stands for res2 depth: 50 variant: d norm_type: bn freeze_at: 0 return_idx: [2] num_stages: 3
PaddleDetection/configs/faster_rcnn/faster_rcnn_r50_vd_1x_coco.yml/0
{ "file_path": "PaddleDetection/configs/faster_rcnn/faster_rcnn_r50_vd_1x_coco.yml", "repo_id": "PaddleDetection", "token_count": 149 }
18
_BASE_: [ '../datasets/coco_detection.yml', '../runtime.yml', '_base_/fcos_r50_fpn.yml', '_base_/optimizer_1x.yml', '_base_/fcos_reader.yml', ] weights: output/fcos_r50_fpn_iou_multiscale_2x_coco_010/model_final TrainReader: sample_transforms: - Decode: {} - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], keep_ratio: True, interp: 1} - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True} - RandomFlip: {} batch_transforms: - Permute: {} - PadBatch: {pad_to_stride: 32} - Gt2FCOSTarget: object_sizes_boundary: [64, 128, 256, 512] center_sampling_radius: 1.5 downsample_ratios: [8, 16, 32, 64, 128] norm_reg_targets: True batch_size: 2 shuffle: True drop_last: True use_shared_memory: True EvalReader: sample_transforms: - Decode: {} - Resize: {target_size: [800, 1333], keep_ratio: True, interp: 1} - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True} - Permute: {} batch_transforms: - PadBatch: {pad_to_stride: 32} batch_size: 1 TestReader: sample_transforms: - Decode: {} - Resize: {target_size: [800, 1333], keep_ratio: True, interp: 1} - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True} - Permute: {} batch_transforms: - PadBatch: {pad_to_stride: 32} batch_size: 1 fuse_normalize: True epoch: 24 LearningRate: base_lr: 0.01 schedulers: - !PiecewiseDecay gamma: 0.1 milestones: [16, 22] - !LinearWarmup start_factor: 0.001 steps: 1000 FCOSHead: fcos_feat: name: FCOSFeat feat_in: 256 feat_out: 256 num_convs: 4 norm_type: "gn" use_dcn: False fpn_stride: [8, 16, 32, 64, 128] prior_prob: 0.01 norm_reg_targets: True centerness_on_reg: True fcos_loss: name: FCOSLoss loss_alpha: 0.25 loss_gamma: 2.0 iou_loss_type: "giou" reg_weights: 1.0 quality: "iou" # default 'centerness' nms: name: MultiClassNMS nms_top_k: 1000 keep_top_k: 100 score_threshold: 0.025 nms_threshold: 0.6
PaddleDetection/configs/fcos/fcos_r50_fpn_iou_multiscale_2x_coco.yml/0
{ "file_path": "PaddleDetection/configs/fcos/fcos_r50_fpn_iou_multiscale_2x_coco.yml", "repo_id": "PaddleDetection", "token_count": 1059 }
19
architecture: GFL pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams GFL: backbone: ResNet neck: FPN head: GFLHead ResNet: depth: 50 variant: b norm_type: bn freeze_at: 0 return_idx: [1,2,3] num_stages: 4 FPN: out_channel: 256 spatial_scales: [0.125, 0.0625, 0.03125] extra_stage: 2 has_extra_convs: true use_c5: false GFLHead: conv_feat: name: FCOSFeat feat_in: 256 feat_out: 256 num_convs: 4 norm_type: "gn" use_dcn: false fpn_stride: [8, 16, 32, 64, 128] prior_prob: 0.01 reg_max: 16 loss_class: name: QualityFocalLoss use_sigmoid: True beta: 2.0 loss_weight: 1.0 loss_dfl: name: DistributionFocalLoss loss_weight: 0.25 loss_bbox: name: GIoULoss loss_weight: 2.0 nms: name: MultiClassNMS nms_top_k: 1000 keep_top_k: 100 score_threshold: 0.025 nms_threshold: 0.6
PaddleDetection/configs/gfl/_base_/gfl_r50_fpn.yml/0
{ "file_path": "PaddleDetection/configs/gfl/_base_/gfl_r50_fpn.yml", "repo_id": "PaddleDetection", "token_count": 454 }
20
_BASE_: [ 'mask_rcnn_r50_fpn_1x_coco.yml', ] pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_pretrained.pdparams weights: output/mask_rcnn_r50_vd_fpn_1x_coco/model_final ResNet: # index 0 stands for res2 depth: 50 variant: d norm_type: bn freeze_at: 0 return_idx: [0,1,2,3] num_stages: 4
PaddleDetection/configs/mask_rcnn/mask_rcnn_r50_vd_fpn_1x_coco.yml/0
{ "file_path": "PaddleDetection/configs/mask_rcnn/mask_rcnn_r50_vd_fpn_1x_coco.yml", "repo_id": "PaddleDetection", "token_count": 160 }
21
简体中文 | [English](README.md) # FairMOT (FairMOT: On the Fairness of Detection and Re-Identification in Multiple Object Tracking) ## 内容 - [简介](#简介) - [模型库](#模型库) - [快速开始](#快速开始) - [引用](#引用) ## 内容 [FairMOT](https://arxiv.org/abs/2004.01888)以Anchor Free的CenterNet检测器为基础,克服了Anchor-Based的检测框架中anchor和特征不对齐问题,深浅层特征融合使得检测和ReID任务各自获得所需要的特征,并且使用低维度ReID特征,提出了一种由两个同质分支组成的简单baseline来预测像素级目标得分和ReID特征,实现了两个任务之间的公平性,并获得了更高水平的实时多目标跟踪精度。 ### PP-Tracking 实时多目标跟踪系统 此外,PaddleDetection还提供了[PP-Tracking](../../../deploy/pptracking/README.md)实时多目标跟踪系统。PP-Tracking是基于PaddlePaddle深度学习框架的业界首个开源的实时多目标跟踪系统,具有模型丰富、应用广泛和部署高效三大优势。 PP-Tracking支持单镜头跟踪(MOT)和跨镜头跟踪(MTMCT)两种模式,针对实际业务的难点和痛点,提供了行人跟踪、车辆跟踪、多类别跟踪、小目标跟踪、流量统计以及跨镜头跟踪等各种多目标跟踪功能和应用,部署方式支持API调用和GUI可视化界面,部署语言支持Python和C++,部署平台环境支持Linux、NVIDIA Jetson等。 ### AI Studio公开项目案例 PP-Tracking 提供了AI Studio公开项目案例,教程请参考[PP-Tracking之手把手玩转多目标跟踪](https://aistudio.baidu.com/aistudio/projectdetail/3022582)。 ## 模型库 ### FairMOT在MOT-16 Training Set上结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :---: | :----: | :---: | :------: | :----: |:----: | | DLA-34(paper) | 1088x608 | 83.3 | 81.9 | 544 | 3822 | 14095 | - | - | - | | DLA-34 | 1088x608 | 83.2 | 83.1 | 499 | 3861 | 14223 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608.yml) | | DLA-34 | 864x480 | 80.8 | 81.1 | 561 | 3643 | 16967 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_864x480.pdparams) | [配置文件](./fairmot_dla34_30e_864x480.yml) | | DLA-34 | 576x320 | 74.0 | 76.1 | 640 | 4989 | 23034 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_576x320.pdparams) | [配置文件](./fairmot_dla34_30e_576x320.yml) | ### FairMOT在MOT-16 Test Set上结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :----: | :----: | :----: |:-------: | :----: | :----: | | DLA-34(paper) | 1088x608 | 74.9 | 72.8 | 1074 | - | - | 25.9 | - | - | | DLA-34 | 1088x608 | 75.0 | 74.7 | 919 | 7934 | 36747 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608.yml) | | DLA-34 | 864x480 | 73.0 | 72.6 | 977 | 7578 | 40601 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_864x480.pdparams) | [配置文件](./fairmot_dla34_30e_864x480.yml) | | DLA-34 | 576x320 | 69.9 | 70.2 | 1044 | 8869 | 44898 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_576x320.pdparams) | [配置文件](./fairmot_dla34_30e_576x320.yml) | **注意:** - FairMOT DLA-34均使用2个GPU进行训练,每个GPU上batch size为6,训练30个epoch。 ### FairMOT enhance模型 ### 在MOT-16 Test Set上结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | | DLA-34 | 1088x608 | 75.9 | 74.7 | 1021 | 11425 | 31475 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_enhance_dla34_60e_1088x608.pdparams) | [配置文件](./fairmot_enhance_dla34_60e_1088x608.yml) | | HarDNet-85 | 1088x608 | 75.0 | 70.0 | 1050 | 11837 | 32774 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_enhance_hardnet85_30e_1088x608.pdparams) | [配置文件](./fairmot_enhance_hardnet85_30e_1088x608.yml) | ### 在MOT-17 Test Set上结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | | DLA-34 | 1088x608 | 75.3 | 74.2 | 3270 | 29112 | 106749 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_enhance_dla34_60e_1088x608.pdparams) | [配置文件](./fairmot_enhance_dla34_60e_1088x608.yml) | | HarDNet-85 | 1088x608 | 74.7 | 70.7 | 3210 | 29790 | 109914 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_enhance_hardnet85_30e_1088x608.pdparams) | [配置文件](./fairmot_enhance_hardnet85_30e_1088x608.yml) | **注意:** - FairMOT enhance模型均使用8个GPU进行训练,训练集中加入了crowdhuman数据集一起参与训练。 - FairMOT enhance DLA-34 每个GPU上batch size为16,训练60个epoch。 - FairMOT enhance HarDNet-85 每个GPU上batch size为10,训练30个epoch。 ### FairMOT轻量级模型 ### 在MOT-16 Test Set上结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | | HRNetV2-W18 | 1088x608 | 71.7 | 66.6 | 1340 | 8642 | 41592 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_1088x608.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_1088x608.yml) | ### 在MOT-17 Test Set上结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | | HRNetV2-W18 | 1088x608 | 70.7 | 65.7 | 4281 | 22485 | 138468 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_1088x608.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_1088x608.yml) | | HRNetV2-W18 | 864x480 | 70.3 | 65.8 | 4056 | 18927 | 144486 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_864x480.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_864x480.yml) | | HRNetV2-W18 | 576x320 | 65.3 | 64.8 | 4137 | 28860 | 163017 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml) | **注意:** - FairMOT HRNetV2-W18均使用8个GPU进行训练,每个GPU上batch size为4,训练30个epoch,使用的ImageNet预训练,优化器策略采用的是Momentum,并且训练集中加入了crowdhuman数据集一起参与训练。 ### FairMOT + BYTETracker ### 在MOT-17 Half上结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | | DLA-34 | 1088x608 | 69.1 | 72.8 | 299 | 1957 | 14412 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608.yml) | | DLA-34 + BYTETracker| 1088x608 | 70.3 | 73.2 | 234 | 2176 | 13598 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_bytetracker.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_bytetracker.yml) | **注意:** - FairMOT模型此处是ablation study的配置,使用的训练集是原先MIX的5个数据集(Caltech,CUHKSYSU,PRW,Cityscapes,ETHZ)加上MOT17 Train的前一半,且使用是预训练权重是CenterNet的COCO预训练权重,验证是在MOT17 Train的后一半上测的。 - BYTETracker应用到PaddleDetection的其他FairMOT模型,只需要更改对应的config文件里的tracker部分为如下所示: ``` JDETracker: use_byte: True match_thres: 0.8 conf_thres: 0.4 low_conf_thres: 0.2 ``` ### FairMOT迁移学习模型 ### 在GMOT-40的airplane子集上的结果 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | IDS | FP | FN | FPS | 下载链接 | 配置文件 | | :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: | | DLA-34 | 1088x608 | 96.6 | 94.7 | 19 | 300 | 466 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_airplane.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_airplane.yml) | **注意:** - 此模型数据集是GMOT-40的airplane类别抽离出来的子集,PaddleDetection团队整理后的下载链接为: ```wget https://bj.bcebos.com/v1/paddledet/data/mot/airplane.zip```,下载解压存放于 ```dataset/mot```目录下,并将其中的```airplane.train```复制存放于```dataset/mot/image_lists```。 - FairMOT模型此处训练是采用行人FairMOT训好的模型作为预训练权重,使用的训练集是airplane全集共4个视频序列,验证也是在全集上测的。 - 应用到其他物体的跟踪,需要更改对应的config文件里的tracker部分的```min_box_area```和```vertical_ratio```,如下所示: ``` JDETracker: conf_thres: 0.4 tracked_thresh: 0.4 metric_type: cosine min_box_area: 0 # 200 for pedestrian vertical_ratio: 0 # 1.6 for pedestrian ``` ## 快速开始 ### 1. 训练 使用2个GPU通过如下命令一键式启动训练 ```bash python -m paddle.distributed.launch --log_dir=./fairmot_dla34_30e_1088x608/ --gpus 0,1 tools/train.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml ``` ### 2. 评估 使用单张GPU通过如下命令一键式启动评估 ```bash # 使用PaddleDetection发布的权重 CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams # 使用训练保存的checkpoint CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml -o weights=output/fairmot_dla34_30e_1088x608/model_final.pdparams ``` **注意:** - 默认评估的是MOT-16 Train Set数据集, 如需换评估数据集可参照以下代码修改`configs/datasets/mot.yml`: ``` EvalMOTDataset: !MOTImageFolder dataset_dir: dataset/mot data_root: MOT17/images/train keep_ori_im: False # set True if save visualization images or video ``` - 跟踪结果会存于`{output_dir}/mot_results/`中,里面每个视频序列对应一个txt,每个txt文件每行信息是`frame,id,x1,y1,w,h,score,-1,-1,-1`, 此外`{output_dir}`可通过`--output_dir`设置。 ### 3. 预测 使用单个GPU通过如下命令预测一个视频,并保存为视频 ```bash # 预测一个视频 CUDA_VISIBLE_DEVICES=0 python tools/infer_mot.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams --video_file={your video name}.mp4 --save_videos ``` **注意:** - 请先确保已经安装了[ffmpeg](https://ffmpeg.org/ffmpeg.html), Linux(Ubuntu)平台可以直接用以下命令安装:`apt-get update && apt-get install -y ffmpeg`。 ### 4. 导出预测模型 ```bash CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams ``` ### 5. 用导出的模型基于Python去预测 ```bash python deploy/pptracking/python/mot_jde_infer.py --model_dir=output_inference/fairmot_dla34_30e_1088x608 --video_file={your video name}.mp4 --device=GPU --save_mot_txts ``` **注意:** - 跟踪模型是对视频进行预测,不支持单张图的预测,默认保存跟踪结果可视化后的视频,可添加`--save_mot_txts`表示保存跟踪结果的txt文件,或`--save_images`表示保存跟踪结果可视化图片。 - 跟踪结果txt文件每行信息是`frame,id,x1,y1,w,h,score,-1,-1,-1`。 ### 6. 用导出的跟踪和关键点模型Python联合预测 ```bash python deploy/python/mot_keypoint_unite_infer.py --mot_model_dir=output_inference/fairmot_dla34_30e_1088x608/ --keypoint_model_dir=output_inference/higherhrnet_hrnet_w32_512/ --video_file={your video name}.mp4 --device=GPU ``` **注意:** - 关键点模型导出教程请参考`configs/keypoint/README.md`。 ## 引用 ``` @article{zhang2020fair, title={FairMOT: On the Fairness of Detection and Re-Identification in Multiple Object Tracking}, author={Zhang, Yifu and Wang, Chunyu and Wang, Xinggang and Zeng, Wenjun and Liu, Wenyu}, journal={arXiv preprint arXiv:2004.01888}, year={2020} } @article{shao2018crowdhuman, title={CrowdHuman: A Benchmark for Detecting Human in a Crowd}, author={Shao, Shuai and Zhao, Zijian and Li, Boxun and Xiao, Tete and Yu, Gang and Zhang, Xiangyu and Sun, Jian}, journal={arXiv preprint arXiv:1805.00123}, year={2018} } ```
PaddleDetection/configs/mot/fairmot/README_cn.md/0
{ "file_path": "PaddleDetection/configs/mot/fairmot/README_cn.md", "repo_id": "PaddleDetection", "token_count": 7948 }
22
# This config is an assembled config for ByteTrack MOT, used as eval/infer mode for MOT. _BASE_: [ '../bytetrack/detector/yolox_x_24e_800x1440_mix_det.yml', '../bytetrack/_base_/mix_det.yml', '../bytetrack/_base_/yolox_mot_reader_800x1440.yml' ] weights: output/ocsort_yolox/model_final log_iter: 20 snapshot_epoch: 2 metric: MOT # eval/infer mode num_classes: 1 architecture: ByteTrack pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/yolox_x_300e_coco.pdparams ByteTrack: detector: YOLOX reid: None tracker: OCSORTTracker det_weights: https://bj.bcebos.com/v1/paddledet/models/mot/yolox_x_24e_800x1440_mix_mot_ch.pdparams reid_weights: None depth_mult: 1.33 width_mult: 1.25 YOLOX: backbone: CSPDarkNet neck: YOLOCSPPAN head: YOLOXHead input_size: [800, 1440] size_stride: 32 size_range: [18, 22] # multi-scale range [576*1024 ~ 800*1440], w/h ratio=1.8 CSPDarkNet: arch: "X" return_idx: [2, 3, 4] depthwise: False YOLOCSPPAN: depthwise: False # Tracking requires higher quality boxes, so NMS score_threshold will be higher YOLOXHead: l1_epoch: 20 depthwise: False loss_weight: {cls: 1.0, obj: 1.0, iou: 5.0, l1: 1.0} assigner: name: SimOTAAssigner candidate_topk: 10 use_vfl: False nms: name: MultiClassNMS nms_top_k: 1000 keep_top_k: 100 score_threshold: 0.1 nms_threshold: 0.7 # For speed while keep high mAP, you can modify 'nms_top_k' to 1000 and 'keep_top_k' to 100, the mAP will drop about 0.1%. # For high speed demo, you can modify 'score_threshold' to 0.25 and 'nms_threshold' to 0.45, but the mAP will drop a lot. OCSORTTracker: det_thresh: 0.6 max_age: 30 min_hits: 3 iou_threshold: 0.3 delta_t: 3 inertia: 0.2 vertical_ratio: 1.6 min_box_area: 100 use_byte: False # MOTDataset for MOT evaluation and inference EvalMOTDataset: !MOTImageFolder dataset_dir: dataset/mot data_root: MOT17/images/half keep_ori_im: True # set as True in DeepSORT and ByteTrack TestMOTDataset: !MOTImageFolder dataset_dir: dataset/mot keep_ori_im: True # set True if save visualization images or video
PaddleDetection/configs/mot/ocsort/ocsort_yolox.yml/0
{ "file_path": "PaddleDetection/configs/mot/ocsort/ocsort_yolox.yml", "repo_id": "PaddleDetection", "token_count": 910 }
23
epoch: 300 LearningRate: base_lr: 0.15 schedulers: - !CosineDecay max_epochs: 300 - !LinearWarmup start_factor: 1.0 steps: 34350 OptimizerBuilder: optimizer: momentum: 0.9 type: Momentum regularizer: factor: 0.00004 type: L2
PaddleDetection/configs/picodet/legacy_model/pruner/optimizer_300e_pruner.yml/0
{ "file_path": "PaddleDetection/configs/picodet/legacy_model/pruner/optimizer_300e_pruner.yml", "repo_id": "PaddleDetection", "token_count": 124 }
24
_BASE_: [ '../datasets/coco_detection.yml', '../runtime.yml', '../ppyoloe/_base_/optimizer_300e.yml', '../ppyoloe/_base_/ppyoloe_plus_crn_tiny_auxhead.yml', '../ppyoloe/_base_/ppyoloe_plus_reader_320.yml', ] log_iter: 100 snapshot_epoch: 4 weights: output/ppyoloe_plus_crn_t_auxhead_320_60e_pphuman/model_final pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_t_auxhead_300e_coco.pdparams # 640*640 COCO mAP 39.7 depth_mult: 0.33 width_mult: 0.375 num_classes: 1 TrainDataset: !COCODataSet image_dir: "" anno_path: annotations/train.json dataset_dir: dataset/pphuman data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd'] EvalDataset: !COCODataSet image_dir: "" anno_path: annotations/val.json dataset_dir: dataset/pphuman TestDataset: !ImageFolder anno_path: annotations/val.json dataset_dir: dataset/pphuman TrainReader: batch_size: 8 epoch: 60 LearningRate: base_lr: 0.001 schedulers: - !CosineDecay max_epochs: 72 - !LinearWarmup start_factor: 0. epochs: 1 PPYOLOEHead: static_assigner_epoch: -1 nms: name: MultiClassNMS nms_top_k: 1000 keep_top_k: 300 score_threshold: 0.01 nms_threshold: 0.7
PaddleDetection/configs/pphuman/ppyoloe_plus_crn_t_auxhead_320_60e_pphuman.yml/0
{ "file_path": "PaddleDetection/configs/pphuman/ppyoloe_plus_crn_t_auxhead_320_60e_pphuman.yml", "repo_id": "PaddleDetection", "token_count": 576 }
25
{ "images": [], "annotations": [], "categories": [ { "supercategory": "component", "id": 1, "name": "car" }, { "supercategory": "component", "id": 2, "name": "truck" }, { "supercategory": "component", "id": 3, "name": "bus" }, { "supercategory": "component", "id": 4, "name": "motorbike" }, { "supercategory": "component", "id": 5, "name": "tricycle" }, { "supercategory": "component", "id": 6, "name": "carplate" } ] }
PaddleDetection/configs/ppvehicle/vehicle_yolov3/vehicle.json/0
{ "file_path": "PaddleDetection/configs/ppvehicle/vehicle_yolov3/vehicle.json", "repo_id": "PaddleDetection", "token_count": 475 }
26
epoch: 300 LearningRate: base_lr: 0.01 schedulers: - name: CosineDecay max_epochs: 360 - name: LinearWarmup start_factor: 0. epochs: 5 OptimizerBuilder: optimizer: momentum: 0.9 type: Momentum regularizer: factor: 0.0005 type: L2
PaddleDetection/configs/ppyoloe/_base_/optimizer_300e.yml/0
{ "file_path": "PaddleDetection/configs/ppyoloe/_base_/optimizer_300e.yml", "repo_id": "PaddleDetection", "token_count": 134 }
27
_BASE_: [ './_base_/exdark_detection.yml', '../../runtime.yml', '../_base_/optimizer_80e.yml', '../_base_/ppyoloe_crn.yml', '../_base_/ppyoloe_reader.yml', ] log_iter: 100 snapshot_epoch: 5 weights: output/ppyoloe_crn_m_80e_exdark/model_final pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_crn_m_300e_coco.pdparams depth_mult: 0.67 width_mult: 0.75
PaddleDetection/configs/ppyoloe/application/ppyoloe_crn_m_80e_exdark.yml/0
{ "file_path": "PaddleDetection/configs/ppyoloe/application/ppyoloe_crn_m_80e_exdark.yml", "repo_id": "PaddleDetection", "token_count": 183 }
28
_BASE_: [ '../ppyoloe_plus_crn_s_80e_coco.yml', ] for_distill: True architecture: PPYOLOE PPYOLOE: backbone: CSPResNet neck: CustomCSPPAN yolo_head: PPYOLOEHead post_process: ~ worker_num: 4 TrainReader: sample_transforms: - Decode: {} - RandomDistort: {} - RandomExpand: {fill_value: [123.675, 116.28, 103.53]} - RandomCrop: {} - RandomFlip: {} batch_transforms: - BatchRandomResize: {target_size: [640], random_size: True, random_interp: True, keep_ratio: False} - NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], norm_type: none} - Permute: {} - PadGT: {} batch_size: 8 shuffle: True drop_last: True use_shared_memory: True collate_batch: True log_iter: 100 snapshot_epoch: 5 weights: output/ppyoloe_plus_crn_s_80e_coco_distill/model_final pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_s_obj365_pretrained.pdparams depth_mult: 0.33 width_mult: 0.50
PaddleDetection/configs/ppyoloe/distill/ppyoloe_plus_crn_s_80e_coco_distill.yml/0
{ "file_path": "PaddleDetection/configs/ppyoloe/distill/ppyoloe_plus_crn_s_80e_coco_distill.yml", "repo_id": "PaddleDetection", "token_count": 418 }
29
_BASE_: [ '../datasets/coco_detection.yml', '../runtime.yml', './_base_/optimizer_300e.yml', './_base_/ppyoloe_plus_crn_tiny_auxhead.yml', './_base_/ppyoloe_plus_reader_320.yml', ] log_iter: 100 snapshot_epoch: 10 weights: output/ppyoloe_plus_crn_t_auxhead_320_300e_coco/model_final pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_t_pretrained.pdparams depth_mult: 0.33 width_mult: 0.375
PaddleDetection/configs/ppyoloe/ppyoloe_plus_crn_t_auxhead_320_300e_coco.yml/0
{ "file_path": "PaddleDetection/configs/ppyoloe/ppyoloe_plus_crn_t_auxhead_320_300e_coco.yml", "repo_id": "PaddleDetection", "token_count": 199 }
30
worker_num: 2 TrainReader: sample_transforms: - Decode: {} - AutoAugment: {autoaug_type: v1} - RandomResize: {target_size: [[384,1000], [416,1000], [448,1000], [480,1000], [512,1000], [544,1000], [576,1000], [608,1000], [640,1000], [672,1000]], interp: 2, keep_ratio: True} - RandomFlip: {prob: 0.5} - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]} - Permute: {} batch_transforms: - PadBatch: {pad_to_stride: 32} batch_size: 2 shuffle: true drop_last: true collate_batch: false use_shared_memory: true EvalReader: sample_transforms: - Decode: {} - Resize: {interp: 2, target_size: [640, 640], keep_ratio: True} - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]} - Permute: {} batch_transforms: - PadBatch: {pad_to_stride: 32} batch_size: 1 shuffle: false drop_last: false TestReader: sample_transforms: - Decode: {} - Resize: {interp: 2, target_size: [640, 640], keep_ratio: True} - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]} - Permute: {} batch_transforms: - PadBatch: {pad_to_stride: 32} batch_size: 1 shuffle: false drop_last: false
PaddleDetection/configs/rcnn_enhance/_base_/faster_rcnn_enhance_reader.yml/0
{ "file_path": "PaddleDetection/configs/rcnn_enhance/_base_/faster_rcnn_enhance_reader.yml", "repo_id": "PaddleDetection", "token_count": 527 }
31
简体中文 | [English](README_en.md) # 旋转框检测 ## 内容 - [简介](#简介) - [模型库](#模型库) - [数据准备](#数据准备) - [安装依赖](#安装依赖) ## 简介 旋转框常用于检测带有角度信息的矩形框,即矩形框的宽和高不再与图像坐标轴平行。相较于水平矩形框,旋转矩形框一般包括更少的背景信息。旋转框检测常用于遥感等场景中。 ## 模型库 | 模型 | mAP | 学习率策略 | 角度表示 | 数据增广 | GPU数目 | 每GPU图片数目 | 模型下载 | 配置文件 | |:---:|:----:|:---------:|:-----:|:--------:|:-----:|:------------:|:-------:|:------:| | [S2ANet](./s2anet/README.md) | 73.84 | 2x | le135 | - | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/s2anet_alignconv_2x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/s2anet/s2anet_alignconv_2x_dota.yml) | | [FCOSR](./fcosr/README.md) | 76.62 | 3x | oc | RR | 4 | 4 | [model](https://paddledet.bj.bcebos.com/models/fcosr_x50_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/fcosr/fcosr_x50_3x_dota.yml) | | [PP-YOLOE-R-s](./ppyoloe_r/README.md) | 73.82 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_s_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota.yml) | | [PP-YOLOE-R-s](./ppyoloe_r/README.md) | 79.42 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_s_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota_ms.yml) | | [PP-YOLOE-R-m](./ppyoloe_r/README.md) | 77.64 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_m_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_m_3x_dota.yml) | | [PP-YOLOE-R-m](./ppyoloe_r/README.md) | 79.71 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_m_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_m_3x_dota_ms.yml) | | [PP-YOLOE-R-l](./ppyoloe_r/README.md) | 78.14 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml) | | [PP-YOLOE-R-l](./ppyoloe_r/README.md) | 80.02 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota_ms.yml) | | [PP-YOLOE-R-x](./ppyoloe_r/README.md) | 78.28 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_x_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_x_3x_dota.yml) | | [PP-YOLOE-R-x](./ppyoloe_r/README.md) | 80.73 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_x_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_x_3x_dota_ms.yml) | **注意:** - 如果**GPU卡数**或者**batch size**发生了改变,你需要按照公式 **lr<sub>new</sub> = lr<sub>default</sub> * (batch_size<sub>new</sub> * GPU_number<sub>new</sub>) / (batch_size<sub>default</sub> * GPU_number<sub>default</sub>)** 调整学习率。 - 模型库中的模型默认使用单尺度训练单尺度测试。如果数据增广一栏标明MS,意味着使用多尺度训练和多尺度测试。如果数据增广一栏标明RR,意味着使用RandomRotate数据增广进行训练。 ## 数据准备 ### DOTA数据准备 DOTA数据集是一个大规模的遥感图像数据集,包含旋转框和水平框的标注。可以从[DOTA数据集官网](https://captain-whu.github.io/DOTA/)下载数据集并解压,解压后的数据集目录结构如下所示: ``` ${DOTA_ROOT} ├── test │ └── images ├── train │ ├── images │ └── labelTxt └── val ├── images └── labelTxt ``` 对于有标注的数据,每一张图片会对应一个同名的txt文件,文件中每一行为一个旋转框的标注,其格式如下: ``` x1 y1 x2 y2 x3 y3 x4 y4 class_name difficult ``` #### 单尺度切图 DOTA数据集分辨率较高,因此一般在训练和测试之前对图像进行离线切图,使用单尺度进行切图可以使用以下命令: ``` bash # 对于有标注的数据进行切图 python configs/rotate/tools/prepare_data.py \ --input_dirs ${DOTA_ROOT}/train/ ${DOTA_ROOT}/val/ \ --output_dir ${OUTPUT_DIR}/trainval1024/ \ --coco_json_file DOTA_trainval1024.json \ --subsize 1024 \ --gap 200 \ --rates 1.0 # 对于无标注的数据进行切图需要设置--image_only python configs/rotate/tools/prepare_data.py \ --input_dirs ${DOTA_ROOT}/test/ \ --output_dir ${OUTPUT_DIR}/test1024/ \ --coco_json_file DOTA_test1024.json \ --subsize 1024 \ --gap 200 \ --rates 1.0 \ --image_only ``` #### 多尺度切图 使用多尺度进行切图可以使用以下命令: ``` bash # 对于有标注的数据进行切图 python configs/rotate/tools/prepare_data.py \ --input_dirs ${DOTA_ROOT}/train/ ${DOTA_ROOT}/val/ \ --output_dir ${OUTPUT_DIR}/trainval/ \ --coco_json_file DOTA_trainval1024.json \ --subsize 1024 \ --gap 500 \ --rates 0.5 1.0 1.5 # 对于无标注的数据进行切图需要设置--image_only python configs/rotate/tools/prepare_data.py \ --input_dirs ${DOTA_ROOT}/test/ \ --output_dir ${OUTPUT_DIR}/test1024/ \ --coco_json_file DOTA_test1024.json \ --subsize 1024 \ --gap 500 \ --rates 0.5 1.0 1.5 \ --image_only ``` ### 自定义数据集 旋转框使用标准COCO数据格式,你可以将你的数据集转换成COCO格式以训练模型。COCO标准数据格式的标注信息中包含以下信息: ``` python 'annotations': [ { 'id': 2083, 'category_id': 9, 'image_id': 9008, 'bbox': [x, y, w, h], # 水平框标注 'segmentation': [[x1, y1, x2, y2, x3, y3, x4, y4]], # 旋转框标注 ... } ... ] ``` **需要注意的是`bbox`的标注是水平框标注,`segmentation`为旋转框四个点的标注(顺时针或逆时针均可)。在旋转框训练时`bbox`是可以缺省,一般推荐根据旋转框标注`segmentation`生成。** 在PaddleDetection 2.4及之前的版本,`bbox`为旋转框标注[x, y, w, h, angle],`segmentation`缺省,**目前该格式已不再支持,请下载最新数据集或者转换成标准COCO格式**。 ## 安装依赖 旋转框检测模型需要依赖外部算子进行训练,评估等。Linux环境下,你可以执行以下命令进行编译安装 ``` cd ppdet/ext_op python setup.py install ``` Windows环境请按照如下步骤安装: (1)准备Visual Studio (版本需要>=Visual Studio 2015 update3),这里以VS2017为例; (2)点击开始-->Visual Studio 2017-->适用于 VS 2017 的x64本机工具命令提示; (3)设置环境变量:`set DISTUTILS_USE_SDK=1` (4)进入`PaddleDetection/ppdet/ext_op`目录,通过`python setup.py install`命令进行安装。 安装完成后,可以执行`ppdet/ext_op/unittest`下的单测验证外部op是否正确安装
PaddleDetection/configs/rotate/README.md/0
{ "file_path": "PaddleDetection/configs/rotate/README.md", "repo_id": "PaddleDetection", "token_count": 4354 }
32
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import six import glob import time import yaml import argparse import cv2 import numpy as np import paddle import paddle.version as paddle_version from paddle.inference import Config, create_predictor, PrecisionType, get_trt_runtime_version TUNED_TRT_DYNAMIC_MODELS = {'DETR'} def check_version(version='2.2'): err = "PaddlePaddle version {} or higher is required, " \ "or a suitable develop version is satisfied as well. \n" \ "Please make sure the version is good with your code.".format(version) version_installed = [ paddle_version.major, paddle_version.minor, paddle_version.patch, paddle_version.rc ] if version_installed == ['0', '0', '0', '0']: return if version == 'develop': raise Exception("PaddlePaddle develop version is required!") version_split = version.split('.') length = min(len(version_installed), len(version_split)) for i in six.moves.range(length): if version_installed[i] > version_split[i]: return if version_installed[i] < version_split[i]: raise Exception(err) def check_trt_version(version='8.2'): err = "TensorRT version {} or higher is required," \ "Please make sure the version is good with your code.".format(version) version_split = list(map(int, version.split('.'))) version_installed = get_trt_runtime_version() length = min(len(version_installed), len(version_split)) for i in six.moves.range(length): if version_installed[i] > version_split[i]: return if version_installed[i] < version_split[i]: raise Exception(err) # preprocess ops def decode_image(im_file, im_info): if isinstance(im_file, str): with open(im_file, 'rb') as f: im_read = f.read() data = np.frombuffer(im_read, dtype='uint8') im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) else: im = im_file im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32) im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32) return im, im_info class Resize(object): def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR): if isinstance(target_size, int): target_size = [target_size, target_size] self.target_size = target_size self.keep_ratio = keep_ratio self.interp = interp def __call__(self, im, im_info): assert len(self.target_size) == 2 assert self.target_size[0] > 0 and self.target_size[1] > 0 im_channel = im.shape[2] im_scale_y, im_scale_x = self.generate_scale(im) im = cv2.resize( im, None, None, fx=im_scale_x, fy=im_scale_y, interpolation=self.interp) im_info['im_shape'] = np.array(im.shape[:2]).astype('float32') im_info['scale_factor'] = np.array( [im_scale_y, im_scale_x]).astype('float32') return im, im_info def generate_scale(self, im): origin_shape = im.shape[:2] im_c = im.shape[2] if self.keep_ratio: im_size_min = np.min(origin_shape) im_size_max = np.max(origin_shape) target_size_min = np.min(self.target_size) target_size_max = np.max(self.target_size) im_scale = float(target_size_min) / float(im_size_min) if np.round(im_scale * im_size_max) > target_size_max: im_scale = float(target_size_max) / float(im_size_max) im_scale_x = im_scale im_scale_y = im_scale else: resize_h, resize_w = self.target_size im_scale_y = resize_h / float(origin_shape[0]) im_scale_x = resize_w / float(origin_shape[1]) return im_scale_y, im_scale_x class Permute(object): def __init__(self, ): super(Permute, self).__init__() def __call__(self, im, im_info): im = im.transpose((2, 0, 1)) return im, im_info class NormalizeImage(object): def __init__(self, mean, std, is_scale=True, norm_type='mean_std'): self.mean = mean self.std = std self.is_scale = is_scale self.norm_type = norm_type def __call__(self, im, im_info): im = im.astype(np.float32, copy=False) if self.is_scale: scale = 1.0 / 255.0 im *= scale if self.norm_type == 'mean_std': mean = np.array(self.mean)[np.newaxis, np.newaxis, :] std = np.array(self.std)[np.newaxis, np.newaxis, :] im -= mean im /= std return im, im_info class PadStride(object): def __init__(self, stride=0): self.coarsest_stride = stride def __call__(self, im, im_info): coarsest_stride = self.coarsest_stride if coarsest_stride <= 0: return im, im_info im_c, im_h, im_w = im.shape pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride) pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride) padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32) padding_im[:, :im_h, :im_w] = im return padding_im, im_info def preprocess(im, preprocess_ops): # process image by preprocess_ops im_info = { 'scale_factor': np.array( [1., 1.], dtype=np.float32), 'im_shape': None, } im, im_info = decode_image(im, im_info) for operator in preprocess_ops: im, im_info = operator(im, im_info) return im, im_info def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( '--model_dir', type=str, help='directory of inference model') parser.add_argument( '--run_mode', type=str, default='paddle', help='running mode') parser.add_argument('--batch_size', type=int, default=1, help='batch size') parser.add_argument( '--image_dir', type=str, default='/paddle/data/DOTA_1024_ss/test1024/images', help='directory of test images') parser.add_argument( '--warmup_iter', type=int, default=5, help='num of warmup iters') parser.add_argument( '--total_iter', type=int, default=2000, help='num of total iters') parser.add_argument( '--log_iter', type=int, default=50, help='num of log interval') parser.add_argument( '--tuned_trt_shape_file', type=str, default='shape_range_info.pbtxt', help='dynamic shape range info') args = parser.parse_args() return args def init_predictor(FLAGS): model_dir, run_mode, batch_size = FLAGS.model_dir, FLAGS.run_mode, FLAGS.batch_size yaml_file = os.path.join(model_dir, 'infer_cfg.yml') with open(yaml_file) as f: yml_conf = yaml.safe_load(f) config = Config( os.path.join(model_dir, 'model.pdmodel'), os.path.join(model_dir, 'model.pdiparams')) # initial GPU memory(M), device ID config.enable_use_gpu(200, 0) # optimize graph and fuse op config.switch_ir_optim(True) precision_map = { 'trt_int8': Config.Precision.Int8, 'trt_fp32': Config.Precision.Float32, 'trt_fp16': Config.Precision.Half } arch = yml_conf['arch'] tuned_trt_shape_file = os.path.join(model_dir, FLAGS.tuned_trt_shape_file) if run_mode in precision_map.keys(): if arch in TUNED_TRT_DYNAMIC_MODELS and not os.path.exists( tuned_trt_shape_file): print( 'dynamic shape range info is saved in {}. After that, rerun the code'. format(tuned_trt_shape_file)) config.collect_shape_range_info(tuned_trt_shape_file) config.enable_tensorrt_engine( workspace_size=(1 << 25) * batch_size, max_batch_size=batch_size, min_subgraph_size=yml_conf['min_subgraph_size'], precision_mode=precision_map[run_mode], use_static=True, use_calib_mode=False) if yml_conf['use_dynamic_shape']: if arch in TUNED_TRT_DYNAMIC_MODELS and os.path.exists( tuned_trt_shape_file): config.enable_tuned_tensorrt_dynamic_shape(tuned_trt_shape_file, True) else: min_input_shape = { 'image': [batch_size, 3, 640, 640], 'scale_factor': [batch_size, 2] } max_input_shape = { 'image': [batch_size, 3, 1280, 1280], 'scale_factor': [batch_size, 2] } opt_input_shape = { 'image': [batch_size, 3, 1024, 1024], 'scale_factor': [batch_size, 2] } config.set_trt_dynamic_shape_info( min_input_shape, max_input_shape, opt_input_shape) # disable print log when predict config.disable_glog_info() # enable shared memory config.enable_memory_optim() # disable feed, fetch OP, needed by zero_copy_run config.switch_use_feed_fetch_ops(False) predictor = create_predictor(config) return predictor, yml_conf def create_preprocess_ops(yml_conf): preprocess_ops = [] for op_info in yml_conf['Preprocess']: new_op_info = op_info.copy() op_type = new_op_info.pop('type') preprocess_ops.append(eval(op_type)(**new_op_info)) return preprocess_ops def get_test_images(image_dir): images = set() infer_dir = os.path.abspath(image_dir) exts = ['jpg', 'jpeg', 'png', 'bmp'] exts += [ext.upper() for ext in exts] for ext in exts: images.update(glob.glob('{}/*.{}'.format(infer_dir, ext))) images = list(images) return images def create_inputs(image_files, preprocess_ops): inputs = dict() im_list, im_info_list = [], [] for im_path in image_files: im, im_info = preprocess(im_path, preprocess_ops) im_list.append(im) im_info_list.append(im_info) inputs['im_shape'] = np.stack( [e['im_shape'] for e in im_info_list], axis=0).astype('float32') inputs['scale_factor'] = np.stack( [e['scale_factor'] for e in im_info_list], axis=0).astype('float32') inputs['image'] = np.stack(im_list, axis=0).astype('float32') return inputs def measure_speed(FLAGS): predictor, yml_conf = init_predictor(FLAGS) input_names = predictor.get_input_names() preprocess_ops = create_preprocess_ops(yml_conf) image_files = get_test_images(FLAGS.image_dir) batch_size = FLAGS.batch_size warmup_iter, log_iter, total_iter = FLAGS.warmup_iter, FLAGS.log_iter, FLAGS.total_iter total_time = 0 fps = 0 for i in range(0, total_iter, batch_size): # make data ready inputs = create_inputs(image_files[i:i + batch_size], preprocess_ops) for name in input_names: input_tensor = predictor.get_input_handle(name) input_tensor.copy_from_cpu(inputs[name]) paddle.device.cuda.synchronize() # start running start_time = time.perf_counter() predictor.run() paddle.device.cuda.synchronize() if i >= warmup_iter: total_time += time.perf_counter() - start_time if (i + 1) % log_iter == 0: fps = (i + 1 - warmup_iter) / total_time print( f'Done image [{i + 1:<3}/ {total_iter}], ' f'fps: {fps:.1f} img / s, ' f'times per image: {1000 / fps:.1f} ms / img', flush=True) if (i + 1) == total_iter: fps = (i + 1 - warmup_iter) / total_time print( f'Overall fps: {fps:.1f} img / s, ' f'times per image: {1000 / fps:.1f} ms / img', flush=True) break if __name__ == '__main__': FLAGS = parse_args() if 'trt' in FLAGS.run_mode: check_version('develop') check_trt_version('8.2') else: check_version('2.4') measure_speed(FLAGS)
PaddleDetection/configs/rotate/tools/inference_benchmark.py/0
{ "file_path": "PaddleDetection/configs/rotate/tools/inference_benchmark.py", "repo_id": "PaddleDetection", "token_count": 6082 }
33
_BASE_: [ '../datasets/coco_detection.yml', '../runtime.yml', '_base_/optimizer_6x.yml', '_base_/rtdetr_r50vd.yml', '_base_/rtdetr_reader.yml', ] weights: output/rtdetr_swin_L_384_3x_coco/model_final find_unused_parameters: True log_iter: 100 snapshot_epoch: 2 pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/dino_swin_large_384_4scale_3x_coco.pdparams DETR: backbone: SwinTransformer neck: HybridEncoder transformer: RTDETRTransformer detr_head: DINOHead post_process: DETRPostProcess SwinTransformer: arch: 'swin_L_384' # ['swin_T_224', 'swin_S_224', 'swin_B_224', 'swin_L_224', 'swin_B_384', 'swin_L_384'] ape: false drop_path_rate: 0.2 patch_norm: true out_indices: [1, 2, 3] HybridEncoder: hidden_dim: 256 use_encoder_idx: [2] num_encoder_layers: 6 # encoder_layer: name: TransformerLayer d_model: 256 nhead: 8 dim_feedforward: 2048 # dropout: 0. activation: 'gelu' expansion: 1.0 RTDETRTransformer: num_queries: 300 position_embed_type: sine feat_strides: [8, 16, 32] num_levels: 3 nhead: 8 num_decoder_layers: 6 dim_feedforward: 2048 # dropout: 0.0 activation: relu num_denoising: 100 label_noise_ratio: 0.5 box_noise_scale: 1.0 learnt_init_query: False DINOHead: loss: name: DINOLoss loss_coeff: {class: 1, bbox: 5, giou: 2} aux_loss: True use_vfl: True matcher: name: HungarianMatcher matcher_coeff: {class: 2, bbox: 5, giou: 2} DETRPostProcess: num_top_queries: 300 epoch: 36 LearningRate: base_lr: 0.0001 schedulers: - !PiecewiseDecay gamma: 0.1 milestones: [36] use_warmup: false OptimizerBuilder: clip_grad_by_norm: 0.1 regularizer: false optimizer: type: AdamW weight_decay: 0.0001 param_groups: - params: ['absolute_pos_embed', 'relative_position_bias_table', 'norm'] weight_decay: 0.0
PaddleDetection/configs/rtdetr/rtdetr_swin_L_384_3x_coco.yml/0
{ "file_path": "PaddleDetection/configs/rtdetr/rtdetr_swin_L_384_3x_coco.yml", "repo_id": "PaddleDetection", "token_count": 874 }
34
简体中文 | [English](README_en.md) # Dense Teacher: Dense Pseudo-Labels for Semi-supervised Object Detection ## FCOS模型库 | 模型 | 监督数据比例 | Sup Baseline | Sup Epochs (Iters) | Sup mAP<sup>val<br>0.5:0.95 | Semi mAP<sup>val<br>0.5:0.95 | Semi Epochs (Iters) | 模型下载 | 配置文件 | | :------------: | :---------: | :---------------------: | :---------------------: |:---------------------------: |:----------------------------: | :------------------: |:--------: |:----------: | | DenseTeacher-FCOS | 5% | [sup_config](../baseline/fcos_r50_fpn_2x_coco_sup005.yml) | 24 (8712) | 21.3 | **30.6** | 240 (87120) | [download](https://paddledet.bj.bcebos.com/models/denseteacher_fcos_r50_fpn_coco_semi005.pdparams) | [config](./denseteacher_fcos_r50_fpn_coco_semi005.yml) | | DenseTeacher-FCOS | 10% | [sup_config](../baseline/fcos_r50_fpn_2x_coco_sup010.yml) | 24 (17424) | 26.3 | **35.1** | 240 (174240) | [download](https://paddledet.bj.bcebos.com/models/denseteacher_fcos_r50_fpn_coco_semi010.pdparams) | [config](./denseteacher_fcos_r50_fpn_coco_semi010.yml) | | DenseTeacher-FCOS(LSJ)| 10% | [sup_config](../baseline/fcos_r50_fpn_2x_coco_sup010.yml) | 24 (17424) | 26.3 | **37.1(LSJ)** | 240 (174240) | [download](https://paddledet.bj.bcebos.com/models/denseteacher_fcos_r50_fpn_coco_semi010_lsj.pdparams) | [config](./denseteacher_fcos_r50_fpn_coco_semi010_lsj.yml) | | DenseTeacher-FCOS |100%(full)| [sup_config](../../fcos/fcos_r50_fpn_iou_multiscale_2x_coco.ymll) | 24 (175896) | 42.6 | **44.2** | 24 (175896)| [download](https://paddledet.bj.bcebos.com/models/denseteacher_fcos_r50_fpn_coco_full.pdparams) | [config](./denseteacher_fcos_r50_fpn_coco_full.yml) | **注意:** - 以上模型训练默认使用8 GPUs,监督数据总batch_size默认为16,无监督数据总batch_size默认也为16,默认初始学习率为0.01。如果改动了总batch_size,请按线性比例相应地调整学习率; - **监督数据比例**是指使用的有标签COCO数据集占 COCO train2017 全量训练集的百分比,使用的无标签COCO数据集一般也是相同比例,但具体图片和有标签数据的图片不重合; - `Semi Epochs (Iters)`表示**半监督训练**的模型的 Epochs (Iters),如果使用**自定义数据集**,需自行根据Iters换算到对应的Epochs调整,最好保证总Iters 和COCO数据集的设置较为接近; - `Sup mAP`是**只使用有监督数据训练**的模型的精度,请参照**基础检测器的配置文件** 和 [baseline](../baseline); - `Semi mAP`是**半监督训练**的模型的精度,模型下载和配置文件的链接均为**半监督模型**; - `LSJ`表示 **large-scale jittering**,表示使用更大范围的多尺度训练,可进一步提升精度,但训练速度也会变慢; - 半监督检测的配置讲解,请参照[文档](../README.md/#半监督检测配置); - `Dense Teacher`原文使用`R50-va-caffe`预训练,PaddleDetection中默认使用`R50-vb`预训练,如果使用`R50-vd`结合[SSLD](../../../docs/feature_models/SSLD_PRETRAINED_MODEL.md)的预训练模型,可进一步显著提升检测精度,同时backbone部分配置也需要做出相应更改,如: ```python pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams ResNet: depth: 50 variant: d norm_type: bn freeze_at: 0 return_idx: [1, 2, 3] num_stages: 4 lr_mult_list: [0.05, 0.05, 0.1, 0.15] ``` ## PPYOLOE+ 模型库 | 模型 | 监督数据比例 | Sup Baseline | Sup Epochs (Iters) | Sup mAP<sup>val<br>0.5:0.95 | Semi mAP<sup>val<br>0.5:0.95 | Semi Epochs (Iters) | 模型下载 | 配置文件 | | :------------: | :---------: | :---------------------: | :---------------------: |:---------------------------: |:----------------------------: | :------------------: |:--------: |:----------: | | DenseTeacher-PPYOLOE+_s | 5% | [sup_config](../baseline/ppyoloe_plus_crn_s_80e_coco_sup005.yml) | 80 (14480) | 32.8 | **34.0** | 200 (36200) | [download](https://paddledet.bj.bcebos.com/models/denseteacher_ppyoloe_plus_crn_s_coco_semi005.pdparams) | [config](./denseteacher_ppyoloe_plus_crn_s_coco_semi005.yml) | | DenseTeacher-PPYOLOE+_s | 10% | [sup_config](../baseline/ppyoloe_plus_crn_s_80e_coco_sup010.yml) | 80 (14480) | 35.3 | **37.5** | 200 (36200) | [download](https://paddledet.bj.bcebos.com/models/denseteacher_ppyoloe_plus_crn_s_coco_semi010.pdparams) | [config](./denseteacher_ppyoloe_plus_crn_s_coco_semi010.yml) | | DenseTeacher-PPYOLOE+_l | 5% | [sup_config](../baseline/ppyoloe_plus_crn_s_80e_coco_sup005.yml) | 80 (14480) | 42.9 | **45.4** | 200 (36200) | [download](https://paddledet.bj.bcebos.com/models/denseteacher_ppyoloe_plus_crn_l_coco_semi005.pdparams) | [config](./denseteacher_ppyoloe_plus_crn_l_coco_semi005.yml) | | DenseTeacher-PPYOLOE+_l | 10% | [sup_config](../baseline/ppyoloe_plus_crn_l_80e_coco_sup010.yml) | 80 (14480) | 45.7 | **47.4** | 200 (36200) | [download](https://paddledet.bj.bcebos.com/models/denseteacher_ppyoloe_plus_crn_l_coco_semi010.pdparams) | [config](./denseteacher_ppyoloe_plus_crn_l_coco_semi010.yml) | ## 使用说明 仅训练时必须使用半监督检测的配置文件去训练,评估、预测、部署也可以按基础检测器的配置文件去执行。 ### 训练 ```bash # 单卡训练 (不推荐,需按线性比例相应地调整学习率) CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml --eval # 多卡训练 python -m paddle.distributed.launch --log_dir=denseteacher_fcos_semi010/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml --eval ``` ### 评估 ```bash CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml -o weights=output/denseteacher_fcos_r50_fpn_coco_semi010/model_final.pdparams ``` ### 预测 ```bash CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml -o weights=output/denseteacher_fcos_r50_fpn_coco_semi010/model_final.pdparams --infer_img=demo/000000014439.jpg ``` ### 部署 部署可以使用半监督检测配置文件,也可以使用基础检测器的配置文件去部署和使用。 ```bash # 导出模型 CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/semi_det/denseteacher/denseteacher_fcos_r50_fpn_coco_semi010.yml -o weights=https://paddledet.bj.bcebos.com/models/denseteacher_fcos_r50_fpn_coco_semi010.pdparams # 导出权重预测 CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/denseteacher_fcos_r50_fpn_coco_semi010 --image_file=demo/000000014439_640x640.jpg --device=GPU # 部署测速 CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/denseteacher_fcos_r50_fpn_coco_semi010 --image_file=demo/000000014439_640x640.jpg --device=GPU --run_benchmark=True # --run_mode=trt_fp16 # 导出ONNX paddle2onnx --model_dir output_inference/denseteacher_fcos_r50_fpn_coco_semi010/ --model_filename model.pdmodel --params_filename model.pdiparams --opset_version 12 --save_file denseteacher_fcos_r50_fpn_coco_semi010.onnx ``` ## 引用 ``` @article{denseteacher2022, title={Dense Teacher: Dense Pseudo-Labels for Semi-supervised Object Detection}, author={Hongyu Zhou, Zheng Ge, Songtao Liu, Weixin Mao, Zeming Li, Haiyan Yu, Jian Sun}, journal={arXiv preprint arXiv:2207.02541}, year={2022} } ```
PaddleDetection/configs/semi_det/denseteacher/README.md/0
{ "file_path": "PaddleDetection/configs/semi_det/denseteacher/README.md", "repo_id": "PaddleDetection", "token_count": 4103 }
35
pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams slim: QAT QAT: quant_config: { 'weight_quantize_type': 'channel_wise_abs_max', 'activation_quantize_type': 'moving_average_abs_max', 'weight_bits': 8, 'activation_bits': 8, 'dtype': 'int8', 'window_size': 10000, 'moving_rate': 0.99, 'quantizable_layer_type': ['Conv2D', 'Linear']} print_model: True PPYOLOFPN: in_channels: [160, 368] coord_conv: true conv_block_num: 0 spp: true drop_block: false
PaddleDetection/configs/slim/quant/ppyolo_mbv3_large_qat.yml/0
{ "file_path": "PaddleDetection/configs/slim/quant/ppyolo_mbv3_large_qat.yml", "repo_id": "PaddleDetection", "token_count": 215 }
36
_BASE_: [ './_base_/DOTA_sliced_500_025_detection.yml', '../runtime.yml', '../ppyoloe/_base_/optimizer_300e.yml', '../ppyoloe/_base_/ppyoloe_crn.yml', '../ppyoloe/_base_/ppyoloe_reader.yml', ] log_iter: 100 snapshot_epoch: 10 weights: output/ppyoloe_p2_crn_l_80e_sliced_DOTA_500_025/model_final pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams depth_mult: 1.0 width_mult: 1.0 CSPResNet: return_idx: [0, 1, 2, 3] use_alpha: True CustomCSPPAN: out_channels: [768, 384, 192, 64] TrainReader: batch_size: 4 EvalReader: batch_size: 1 TestReader: batch_size: 1 fuse_normalize: True epoch: 80 LearningRate: base_lr: 0.01 schedulers: - !CosineDecay max_epochs: 96 - !LinearWarmup start_factor: 0. epochs: 1 PPYOLOEHead: fpn_strides: [32, 16, 8, 4] static_assigner_epoch: -1 nms: name: MultiClassNMS nms_top_k: 10000 keep_top_k: 500 score_threshold: 0.01 nms_threshold: 0.6
PaddleDetection/configs/smalldet/ppyoloe_p2_crn_l_80e_sliced_DOTA_500_025.yml/0
{ "file_path": "PaddleDetection/configs/smalldet/ppyoloe_p2_crn_l_80e_sliced_DOTA_500_025.yml", "repo_id": "PaddleDetection", "token_count": 490 }
37
# SSD: Single Shot MultiBox Detector ## Model Zoo ### SSD on Pascal VOC | 骨架网络 | 网络类型 | 每张GPU图片个数 | 学习率策略 |推理时间(fps) | Box AP | 下载 | 配置文件 | | :-------------- | :------------- | :-----: | :-----: | :------------: | :-----: | :-----------------------------------------------------: | :-----: | | VGG | SSD | 8 | 240e | ---- | 77.8 | [下载链接](https://paddledet.bj.bcebos.com/models/ssd_vgg16_300_240e_voc.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ssd/ssd_vgg16_300_240e_voc.yml) | | MobileNet v1 | SSD | 32 | 120e | ---- | 73.8 | [下载链接](https://paddledet.bj.bcebos.com/models/ssd_mobilenet_v1_300_120e_voc.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ssd/ssd_mobilenet_v1_300_120e_voc.yml) | **注意:** SSD-VGG使用4GPU在总batch size为32下训练240个epoch。SSD-MobileNetv1使用2GPU在总batch size为64下训练120周期。 ## Citations ``` @article{Liu_2016, title={SSD: Single Shot MultiBox Detector}, journal={ECCV}, author={Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.}, year={2016}, } ```
PaddleDetection/configs/ssd/README.md/0
{ "file_path": "PaddleDetection/configs/ssd/README.md", "repo_id": "PaddleDetection", "token_count": 723 }
38
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
2
Edit dataset card