text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.llm import export_main
if __name__ == '__main__':
export_main()
| swift/swift/cli/export.py/0 | {
"file_path": "swift/swift/cli/export.py",
"repo_id": "swift",
"token_count": 44
} | 216 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import hashlib
import os
from datetime import datetime
from pathlib import Path
from typing import Optional
from swift.hub.constants import (DEFAULT_MODELSCOPE_DOMAIN, DEFAULT_MODELSCOPE_GROUP, MODEL_ID_SEPARATOR,
MODELSCOPE_SDK_DEBUG, MODELSCOPE_URL_SCHEME)
from swift.hub.errors import FileIntegrityError
from swift.utils.logger import get_logger
logger = get_logger()
def get_default_cache_dir():
"""
default base dir: '~/.cache/modelscope'
"""
default_cache_dir = Path.home().joinpath('.cache', 'modelscope')
return default_cache_dir
def model_id_to_group_owner_name(model_id):
if MODEL_ID_SEPARATOR in model_id:
group_or_owner = model_id.split(MODEL_ID_SEPARATOR)[0]
name = model_id.split(MODEL_ID_SEPARATOR)[1]
else:
group_or_owner = DEFAULT_MODELSCOPE_GROUP
name = model_id
return group_or_owner, name
def get_cache_dir(model_id: Optional[str] = None):
"""cache dir precedence:
function parameter > environment > ~/.cache/modelscope/hub
Args:
model_id (str, optional): The model id.
Returns:
str: the model_id dir if model_id not None, otherwise cache root dir.
"""
default_cache_dir = get_default_cache_dir()
base_path = os.getenv('MODELSCOPE_CACHE', os.path.join(default_cache_dir, 'hub'))
return base_path if model_id is None else os.path.join(base_path, model_id + '/')
def get_release_datetime():
if MODELSCOPE_SDK_DEBUG in os.environ:
rt = int(round(datetime.now().timestamp()))
else:
from swift import version
rt = int(round(datetime.strptime(version.__release_datetime__, '%Y-%m-%d %H:%M:%S').timestamp()))
return rt
def get_endpoint():
modelscope_domain = os.getenv('MODELSCOPE_DOMAIN', DEFAULT_MODELSCOPE_DOMAIN)
return MODELSCOPE_URL_SCHEME + modelscope_domain
def compute_hash(file_path):
BUFFER_SIZE = 1024 * 64 # 64k buffer size
sha256_hash = hashlib.sha256()
with open(file_path, 'rb') as f:
while True:
data = f.read(BUFFER_SIZE)
if not data:
break
sha256_hash.update(data)
return sha256_hash.hexdigest()
def file_integrity_validation(file_path, expected_sha256):
"""Validate the file hash is expected, if not, delete the file
Args:
file_path (str): The file to validate
expected_sha256 (str): The expected sha256 hash
Raises:
FileIntegrityError: If file_path hash is not expected.
"""
file_sha256 = compute_hash(file_path)
if not file_sha256 == expected_sha256:
os.remove(file_path)
msg = 'File %s integrity check failed, the download may be incomplete, please try again.' % file_path
logger.error(msg)
raise FileIntegrityError(msg)
| swift/swift/hub/utils/utils.py/0 | {
"file_path": "swift/swift/hub/utils/utils.py",
"repo_id": "swift",
"token_count": 1182
} | 217 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from typing import Dict, List, Optional, Tuple, Union
from swift.utils import get_logger
from swift.utils.utils import split_str_parts_by
logger = get_logger()
REACT_PROMPT = """Answer the following questions as best you can. You have access to the following tools:
{tool_list}
Use the following format:
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
Final Answer: the final answer to the original input question
Begin!
"""
REACT_ZH_PROMPT = """尽你所能回答以下问题。你拥有如下工具:
{tool_list}
使用以下格式回答:
Thought: 思考你应该做什么
Action: 工具的名称,必须是[{tool_names}]之一
Action Input: 工具的输入
Observation: 工具返回的结果
... (Thought/Action/Action Input/Observation的过程可以重复零次或多次)
Final Answer: 对输入问题的最终答案
开始!
"""
TOOLBENCH_PROMPT = '''You can use many tools(functions) to do the following task.
First I will give you the task description, and your task start.
At each step, you need to give your thought to analyze the status now and what to do next, \
with a function call to actually excute your step. Your output should follow this format:
Thought:
Action:
Action Input:
After the call, you will get the call result, and you are now in a new state.
Then you will analyze your status now, then decide what to do next...
After many (Thought-call) pairs, you finally perform the task, then you can give your finial answer.
Remember:
1.the state change is irreversible, you can't go back to one of the former state, if you want to restart the task, \
say \"I give up and restart\".
2.All the thought is short, at most in 5 sentence.
3.You can do more then one trys, so if your plan is to continusly try some conditions, \
you can do one of the conditions per try.
Let's Begin!
Task description: You should use functions to help handle the real time user querys. Remember:
1.ALWAYS call \"Finish\" function at the end of the task. And the final answer should contain enough information \
to show to the user,If you can't handle the task, \
or you find that function calls always fail(the function is not valid now), \
use function Finish->give_up_and_restart.
2.Do not use origin tool names, use only subfunctions' names.
Specifically, you have access to the following APIs: {tool_list}'''
def calculate_loss_scale(query: str,
response: str,
use_loss_scale=False,
response_loss_scale_map: Optional[Dict[str, list]] = None,
query_loss_scale_map: Optional[Dict[str, list]] = None) -> Tuple[List[str], List[float]]:
"""Calculate the loss scale by splitting the agent response.
This algorithm comes from paper: https://arxiv.org/pdf/2309.00986.pdf
Agent response format:
```text
Thought: you should always think about what to do
Action: the action to take, should be one of the above tools[fire_recognition,
fire_alert, call_police, call_fireman]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
```
Args:
response: The response text
use_loss_scale: Use weighted loss. With this, some part of the loss will be enhanced to improve performance.
Returns:
A tuple of agent response parts and their weights.
"""
if use_loss_scale:
# query loss scale map
if query_loss_scale_map is not None:
for key in query_loss_scale_map.keys():
if key in query:
if isinstance(query_loss_scale_map[key], (float, int)):
query_loss_scale_map[key] = [query_loss_scale_map[key]]
loss_scale_value = query_loss_scale_map[key][0]
return [response], [float(loss_scale_value)]
delimiters = list(k for k in response_loss_scale_map.keys() if len(response_loss_scale_map[k]) == 2)
agent_parts = split_str_parts_by(response, delimiters)
regex_delimiters = {k: v for k, v in response_loss_scale_map.items() if len(v) == 1}
if len(regex_delimiters):
split_parts_by_regex(agent_parts, regex_delimiters)
weights = []
agent_content = []
for c in agent_parts:
if isinstance(c['key'], (float, int)):
weights += [c['key']]
agent_content.append(c['content'])
else:
if c['key'] in response_loss_scale_map:
weights += [response_loss_scale_map[c['key']][0]]
weights += [response_loss_scale_map[c['key']][1]]
agent_content.append(c['key'])
agent_content.append(c['content'])
else:
weights += [1.0]
agent_content.append(c['content'])
return agent_content, weights
else:
return [response], [1.0]
def split_action_action_input(response: str) -> Tuple[Optional[str], Optional[str]]:
agent_keyword = [
'action:', 'Action:', 'ACTION:', 'action input:', 'Action Input:', 'Action input:', 'ACTION INPUT:', 'Thought:',
'Final Answer:', 'Observation:'
]
agent_parts = split_str_parts_by(response, agent_keyword)
action = None
action_input = None
for c in agent_parts:
if c['key'].lower() == 'action:':
action = c['content']
elif c['key'].lower() == 'action input:':
action_input = c['content']
if action:
action = action.strip().replace('\n', '')
if action_input:
action_input.strip().replace('\n', '')
return action, action_input
def split_parts_by_regex(text_list: list, regex_delimiters: Dict[str, List[float]]) -> None:
import re
compiled_patterns = [(re.compile(pattern), scale) for pattern, scale in regex_delimiters.items()]
for i in range(len(text_list) - 1, -1, -1):
item = text_list[i]
if item.get('key') == '':
res_text = item['content']
last_idx = 0
segments = []
for pattern, scale in compiled_patterns:
matches = list(re.finditer(pattern, res_text))
for match in matches:
if match.start() > last_idx:
segments.append({'key': '', 'content': res_text[last_idx:match.start()]})
segments.append({'key': scale[0], 'content': match.group(0)})
last_idx = match.end()
if last_idx < len(res_text):
segments.insert(0, {'key': '', 'content': res_text[last_idx:]})
if segments:
text_list[i:i + 1] = segments
def get_tools_prompt(TOOLS: List[Dict[str, Union[str, dict]]], prompt_format: str = 'react_en') -> Optional[str]:
tool_descs = []
tool_names = []
for info in TOOLS: # info: Dict[str, Union[str, dict]]
try:
if 'function' in info:
info = info['function']
tool_names.append(info['name'])
tool_descs.append(str(info)) # info: dict
except KeyError:
print('invalid tools format, please check'
'https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/Agent-deployment-best-practice.md')
return None
tool_descs = '\n\n'.join(tool_descs)
tool_names = ','.join(tool_names)
if prompt_format == 'react_en':
return REACT_PROMPT.format(tool_list=tool_descs, tool_names=tool_names)
elif prompt_format == 'react_zh':
return REACT_ZH_PROMPT.format(tool_list=tool_descs, tool_names=tool_names)
return TOOLBENCH_PROMPT.format(tool_list=tool_descs)
| swift/swift/llm/agent/utils.py/0 | {
"file_path": "swift/swift/llm/agent/utils.py",
"repo_id": "swift",
"token_count": 3439
} | 218 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import time
import uuid
from dataclasses import dataclass, field
from typing import Dict, List, Literal, Optional, Union
def random_uuid() -> str:
return str(uuid.uuid4().hex)
@dataclass
class Model:
id: str # model_type
is_chat: Optional[bool] = None # chat model or generation model
is_multimodal: bool = False
object: str = 'model'
created: int = field(default_factory=lambda: int(time.time()))
owned_by: str = 'swift'
@dataclass
class ModelList:
data: List[Model]
object: str = 'list'
@dataclass
class XRequestConfig:
"""NOTE: The following behavior is inconsistent with the OpenAI API.
Default values for OpenAI:
temperature = 1.
top_k = -1
top_p = 1.
repetition_penalty = 1.
"""
max_tokens: Optional[int] = None # None: max_model_len - num_tokens
# None: use deploy_args
temperature: Optional[float] = None
top_p: Optional[float] = None
n: int = 1
seed: Optional[int] = None
stop: Optional[List[str]] = None
stream: bool = False
best_of: Optional[int] = None
presence_penalty: float = 0.
frequency_penalty: float = 0.
length_penalty: float = 1.
# additional
num_beams: int = 1
# None: use deploy_args
top_k: Optional[int] = None
repetition_penalty: Optional[float] = None
@dataclass
class CompletionRequestMixin:
model: str
prompt: str
images: List[str] = field(default_factory=list)
@dataclass
class ChatCompletionRequestMixin:
model: str
messages: List[Dict[str, str]]
tools: Optional[List[Dict[str, Union[str, Dict]]]] = None
tool_choice: Optional[Union[str, Dict]] = 'auto'
images: List[str] = field(default_factory=list)
@dataclass
class CompletionRequest(XRequestConfig, CompletionRequestMixin):
pass
@dataclass
class ChatCompletionRequest(XRequestConfig, ChatCompletionRequestMixin):
pass
@dataclass
class UsageInfo:
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
@dataclass
class Function:
arguments: Optional[str] = None
name: str = ''
@dataclass
class ChatCompletionMessageToolCall:
id: str
function: Function
type: str = 'function'
@dataclass
class ChatMessage:
role: Literal['system', 'user', 'assistant']
content: str
tool_calls: Optional[ChatCompletionMessageToolCall] = None
@dataclass
class ChatCompletionResponseChoice:
index: int
message: ChatMessage
finish_reason: Literal['stop', 'length', None] # None: for infer_backend='pt'
@dataclass
class CompletionResponseChoice:
index: int
text: str
finish_reason: Literal['stop', 'length', None] # None: for infer_backend='pt'
@dataclass
class ChatCompletionResponse:
model: str
choices: List[ChatCompletionResponseChoice]
usage: UsageInfo
id: str = field(default_factory=lambda: f'chatcmpl-{random_uuid()}')
object: str = 'chat.completion'
created: int = field(default_factory=lambda: int(time.time()))
@dataclass
class CompletionResponse:
model: str
choices: List[CompletionResponseChoice]
usage: UsageInfo
id: str = field(default_factory=lambda: f'cmpl-{random_uuid()}')
object: str = 'text_completion'
created: int = field(default_factory=lambda: int(time.time()))
@dataclass
class DeltaMessage:
role: Literal['system', 'user', 'assistant']
content: str
tool_calls: Optional[ChatCompletionMessageToolCall] = None
@dataclass
class ChatCompletionResponseStreamChoice:
index: int
delta: DeltaMessage
finish_reason: Literal['stop', 'length', None]
@dataclass
class ChatCompletionStreamResponse:
model: str
choices: List[ChatCompletionResponseStreamChoice]
usage: UsageInfo
id: str = field(default_factory=lambda: f'chatcmpl-{random_uuid()}')
object: str = 'chat.completion.chunk'
created: int = field(default_factory=lambda: int(time.time()))
@dataclass
class CompletionResponseStreamChoice:
index: int
text: str
finish_reason: Literal['stop', 'length', None]
@dataclass
class CompletionStreamResponse:
model: str
choices: List[CompletionResponseStreamChoice]
usage: UsageInfo
id: str = field(default_factory=lambda: f'cmpl-{random_uuid()}')
object: str = 'text_completion.chunk'
created: int = field(default_factory=lambda: int(time.time()))
| swift/swift/llm/utils/protocol.py/0 | {
"file_path": "swift/swift/llm/utils/protocol.py",
"repo_id": "swift",
"token_count": 1633
} | 219 |
from typing import Any, Dict, List, Literal, Tuple, Union
import torch
from torch import nn
from transformers import PreTrainedModel, trainer
from trl import DPOTrainer as HFDPOTrainer
from swift.llm.utils.template import Template
from swift.llm.utils.utils import sort_by_max_length
from swift.utils import get_logger
from .callback import DefaultFlowCallbackNew, PrinterCallbackNew, ProgressCallbackNew
from .mixin import PushToMsHubMixin, SwiftMixin
from .utils import build_tokenized_answer, concat_template
logger = get_logger()
class DPOTrainer(PushToMsHubMixin, SwiftMixin, HFDPOTrainer):
def __init__(self, *args, template: Template, sft_beta=0., test_oom_error=False, **kwargs):
self.template = template
self.sft_beta = sft_beta
super().__init__(*args, **kwargs)
train_ds_info = self.stat_dataset(self.train_dataset)
val_ds_info = self.stat_dataset(self.eval_dataset)
self.dataset_info = {'train_dataset': train_ds_info, 'val_dataset': val_ds_info}
if test_oom_error:
self.train_dataset = sort_by_max_length(self.train_dataset, 20000)
# performance
self.perf: Dict[str, Any] = {
'gen_time': 0.,
'gen_len': 0,
'memory': {},
'model': self.model.get_trainable_parameters() if hasattr(self.model, 'get_trainable_parameters') else None,
}
def train(self, *args, **kwargs) -> torch.Tensor:
res = super().train(*args, **kwargs)
for i in range(torch.cuda.device_count()):
self.perf['memory'][f'cuda:{i}'] = f'{torch.cuda.max_memory_reserved(i)/1024/1024/1024:.2f}GiB'
return res
def tokenize_row(self, feature, model: Union[PreTrainedModel, nn.Module] = None) -> Dict:
batch = {}
if not self.is_encoder_decoder:
prompt, chosen, rejected, loss_scale = concat_template(feature, self.template)
prompt_tokens, _, _, _ = self.template._encode_context_list(prompt, loss_scale)
prompt_tokens = {
'input_ids': prompt_tokens,
'attention_mask': [1] * len(prompt_tokens),
}
prompt_tokens = {f'prompt_{k}': v for k, v in prompt_tokens.items()}
if not isinstance(chosen, str):
raise ValueError(f'chosen should be an str but got {type(chosen)}')
chosen_tokens = build_tokenized_answer(chosen, self.template)
# Avoid tokenizing the prompt repeatedly.
chosen_tokens.update(prompt_tokens)
if not isinstance(rejected, str):
raise ValueError(f'rejected should be an str but got {type(rejected)}')
rejected_tokens = build_tokenized_answer(rejected, self.template)
rejected_tokens.update(prompt_tokens)
longer_response_length = max(len(chosen_tokens['input_ids']), len(rejected_tokens['input_ids']))
# if combined sequence is too long, truncate the prompt
for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length:
if self.truncation_mode == 'keep_start':
for k in ['prompt_input_ids', 'prompt_attention_mask']:
answer_tokens[k] = answer_tokens[k][:self.max_prompt_length]
elif self.truncation_mode == 'keep_end':
for k in ['prompt_input_ids', 'prompt_attention_mask']:
answer_tokens[k] = answer_tokens[k][-self.max_prompt_length:]
else:
raise ValueError(f'Unknown truncation mode: {self.truncation_mode}')
# if that's still too long, truncate the response
for answer_tokens in [chosen_tokens, rejected_tokens]:
if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length:
for k in ['input_ids', 'attention_mask']:
answer_tokens[k] = answer_tokens[k][:self.max_length - self.max_prompt_length]
# Create labels
chosen_sequence_tokens = {
k: chosen_tokens[f'prompt_{k}'] + chosen_tokens[k]
for k in ['input_ids', 'attention_mask']
}
rejected_sequence_tokens = {
k: rejected_tokens[f'prompt_{k}'] + rejected_tokens[k]
for k in ['input_ids', 'attention_mask']
}
chosen_sequence_tokens['labels'] = chosen_sequence_tokens['input_ids'][:]
_paddings = [self.label_pad_token_id] * len(chosen_tokens['prompt_input_ids'])
chosen_sequence_tokens['labels'][:len(chosen_tokens['prompt_input_ids'])] = _paddings
rejected_sequence_tokens['labels'] = rejected_sequence_tokens['input_ids'][:]
_paddings = [self.label_pad_token_id] * len(rejected_tokens['prompt_input_ids'])
rejected_sequence_tokens['labels'][:len(rejected_tokens['prompt_input_ids'])] = _paddings
for k, toks in {
'chosen_': chosen_sequence_tokens,
'rejected_': rejected_sequence_tokens,
'': prompt_tokens,
}.items():
for type_key, tokens in toks.items():
if type_key == 'token_type_ids':
continue
batch[f'{k}{type_key}'] = tokens
else:
# encoder-decoder
batch = super().tokenize_row(feature, model)
return batch
@staticmethod
def stat_dataset(llm_dataset) -> Any:
_token_len = []
from datasets import Dataset as HfDataset
from swift.utils.np_utils import stat_array
if isinstance(llm_dataset, HfDataset):
chosen = llm_dataset['chosen_input_ids']
rejected = llm_dataset['rejected_input_ids']
for cc, rr in zip(chosen, rejected):
_token_len.append(max(len(cc), len(rr)))
else:
for d in llm_dataset:
_token_len.append(max(len(d['chosen_input_ids']), len(d['rejected_input_ids'])))
_, stat_str = stat_array(_token_len)
logger.info(f'Dataset Token Length: {stat_str}')
return stat_str
def get_batch_loss_metrics(
self,
model,
batch: Dict[str, Union[List, torch.LongTensor]],
train_eval: Literal['train', 'eval'] = 'train',
):
"""Compute the DPO loss and other metrics for the given batch of inputs for train or test."""
metrics = {}
(
policy_chosen_logps,
policy_rejected_logps,
policy_chosen_logits,
policy_rejected_logits,
concatenated_batch,
) = self.concatenated_forward(model, batch)
# if reference_chosen_logps and reference_rejected_logps in batch use them, otherwise use the reference model
if 'reference_chosen_logps' in batch and 'reference_rejected_logps' in batch:
reference_chosen_logps = batch['reference_chosen_logps']
reference_rejected_logps = batch['reference_rejected_logps']
else:
with torch.no_grad():
if self.ref_model is None:
with self.null_ref_context():
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
_,
) = self.concatenated_forward(self.model, batch)
else:
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
_,
) = self.concatenated_forward(self.ref_model, batch)
losses, chosen_rewards, rejected_rewards = self.dpo_loss(
policy_chosen_logps,
policy_rejected_logps,
reference_chosen_logps,
reference_rejected_logps,
)
if self.sft_beta > 0.:
chosen_labels = concatenated_batch['concatenated_labels'][:batch['chosen_labels'].shape[0]]
sft_loss, size_completion = self.get_batch_logps(policy_chosen_logits, chosen_labels)
sft_loss = -sft_loss / size_completion
if losses.shape[0] == 2 * sft_loss.shape[0]:
sft_loss = sft_loss.repeat(2, *sft_loss.shape[1:])
losses = (1 - self.sft_beta) * losses + self.sft_beta * sft_loss
reward_accuracies = (chosen_rewards > rejected_rewards).float()
prefix = 'eval_' if train_eval == 'eval' else ''
metrics[f'{prefix}rewards/chosen'] = chosen_rewards.mean().cpu()
metrics[f'{prefix}rewards/rejected'] = rejected_rewards.mean().cpu()
metrics[f'{prefix}rewards/accuracies'] = reward_accuracies.mean().cpu()
metrics[f'{prefix}rewards/margins'] = (chosen_rewards - rejected_rewards).mean().cpu()
metrics[f'{prefix}logps/rejected'] = policy_rejected_logps.detach().mean().cpu()
metrics[f'{prefix}logps/chosen'] = policy_chosen_logps.detach().mean().cpu()
metrics[f'{prefix}logps/ref_rejected'] = reference_rejected_logps.detach( # noqa
).mean().cpu() # noqa
metrics[f'{prefix}logps/ref_chosen'] = reference_chosen_logps.detach().mean().cpu()
metrics[f'{prefix}logits/rejected'] = policy_rejected_logits.detach().mean().cpu()
metrics[f'{prefix}logits/chosen'] = policy_chosen_logits.detach().mean().cpu()
return losses.mean(), metrics
def concatenated_forward(
self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, Dict[str, torch.LongTensor]]:
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
We do this to avoid doing two forward passes, because it's faster for FSDP.
"""
concatenated_batch = self.concatenated_inputs(
batch,
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
padding_value=self.padding_value,
device=self.accelerator.device,
)
len_chosen = batch['chosen_labels'].shape[0]
model_kwargs = ({
'labels': concatenated_batch['concatenated_labels'],
'decoder_input_ids': concatenated_batch.pop('concatenated_decoder_input_ids', None),
} if self.is_encoder_decoder else {})
all_logits = model(
concatenated_batch['concatenated_input_ids'],
attention_mask=concatenated_batch['concatenated_attention_mask'],
**model_kwargs,
).logits
all_logps, _ = self.get_batch_logps(
all_logits,
concatenated_batch['concatenated_labels'],
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
)
chosen_logps = all_logps[:len_chosen]
rejected_logps = all_logps[len_chosen:]
chosen_logits = all_logits[:len_chosen]
rejected_logits = all_logits[len_chosen:]
return chosen_logps, rejected_logps, chosen_logits, rejected_logits, concatenated_batch
# monkey patching
trainer.DEFAULT_PROGRESS_CALLBACK = ProgressCallbackNew
trainer.DEFAULT_CALLBACKS = [DefaultFlowCallbackNew]
trainer.PrinterCallback = PrinterCallbackNew
| swift/swift/trainers/dpo_trainer.py/0 | {
"file_path": "swift/swift/trainers/dpo_trainer.py",
"repo_id": "swift",
"token_count": 5676
} | 220 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import time
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from peft import PeftModel
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from transformers import Seq2SeqTrainer as HfSeq2SeqTrainer
from transformers import Trainer as HfTrainer
from transformers import trainer
from transformers.modeling_utils import unwrap_model
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from transformers.utils import is_peft_available
from swift.torchacc_utils import ta_eval_dataloader, ta_test_dataloader, ta_train_dataloader, ta_trim_graph
from swift.utils import use_torchacc
from .callback import DefaultFlowCallbackNew, PrinterCallbackNew, ProgressCallbackNew
from .mixin import PushToMsHubMixin, SwiftMixin
try:
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
except ImportError:
from transformers.deepspeed import is_deepspeed_zero3_enabled
class Trainer(PushToMsHubMixin, SwiftMixin, HfTrainer):
pass
class Seq2SeqTrainer(PushToMsHubMixin, SwiftMixin, HfSeq2SeqTrainer):
def __init__(self, *args, **kwargs):
self.sequence_parallel_size = kwargs.pop('sequence_parallel_size', 1)
super().__init__(*args, **kwargs)
# performance
if not hasattr(self, 'perf'):
self.perf = {}
self.perf.update({
'gen_time': 0.,
'gen_len': 0,
})
self._acc = torch.tensor(0.).to(self.args.device)
if self.sequence_parallel_size > 1:
from swift.trainers.xtuner import init_sequence_parallel_xtuner
init_sequence_parallel_xtuner(self.sequence_parallel_size)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
**gen_kwargs,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys)
inputs.pop('loss_scale', None)
has_labels = 'labels' in inputs
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
# Priority (handled in generate):
# gen_kwargs > model.generation_config > default GenerationConfig()
if len(gen_kwargs) == 0 and hasattr(self, '_gen_kwargs'):
gen_kwargs = self._gen_kwargs.copy()
if hasattr(self.model, 'generation_config'):
gen_kwargs.update(self.model.generation_config.to_dict())
if gen_kwargs.get('max_length') is None and gen_kwargs.get('max_new_tokens') is None:
gen_kwargs['max_length'] = self.model.config.max_length
gen_kwargs['num_beams'] = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.model.config.num_beams)
default_synced_gpus = True if is_deepspeed_zero3_enabled() else False
gen_kwargs['synced_gpus'] = (
gen_kwargs['synced_gpus'] if gen_kwargs.get('synced_gpus') is not None else default_synced_gpus)
# If the `decoder_input_ids` was created from `labels`, evict the former, so that the model can freely generate
# (otherwise, it would continue generating from the padded `decoder_input_ids`)
if ('labels' in inputs and 'decoder_input_ids' in inputs
and inputs['labels'].shape == inputs['decoder_input_ids'].shape):
inputs = {k: v for k, v in inputs.items() if k != 'decoder_input_ids'}
gen_kwargs['pad_token_id'] = self.tokenizer.pad_token_id
gen_kwargs['eos_token_id'] = self.tokenizer.eos_token_id
# fix generate warning
if 'max_length' in gen_kwargs and 'max_new_tokens' in gen_kwargs and gen_kwargs['max_new_tokens'] is not None:
gen_kwargs.pop('max_length')
gen_time = time.time()
generate_inputs = inputs.copy()
if has_labels:
_labels = inputs['labels'][0]
n_mask = 0
for i in range(len(_labels)):
if _labels[i] != -100:
n_mask = i
break
for k in ['input_ids', 'attention_mask']:
generate_inputs[k] = generate_inputs[k][:, :n_mask]
generate_inputs['labels'] = generate_inputs['labels'][:, n_mask:]
generated_tokens = self.model.generate(**generate_inputs, **gen_kwargs)
gen_time = time.time() - gen_time
if hasattr(self.model, 'encoder') and self.model.encoder.main_input_name != self.model.main_input_name:
generation_inputs = generate_inputs[self.model.encoder.main_input_name]
else:
generation_inputs = generate_inputs[self.model.main_input_name]
generated_tokens = generated_tokens[:, generation_inputs.shape[1]:]
gen_len = len(generated_tokens[0])
self.perf['gen_time'] = self.perf['gen_time'] + gen_time
self.perf['gen_len'] = self.perf['gen_len'] + gen_len
# in case the batch is shorter than max length, the output should be padded
if gen_kwargs.get('max_length') is not None and generated_tokens.shape[-1] < gen_kwargs['max_length']:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs['max_length'])
elif gen_kwargs.get('max_new_tokens') is not None and generated_tokens.shape[-1] < (gen_kwargs['max_new_tokens']
+ 1):
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs['max_new_tokens'] + 1)
with torch.no_grad():
if has_labels:
with self.compute_loss_context_manager():
outputs = model(**inputs)
if self.label_smoother is not None:
loss = self.label_smoother(outputs, inputs['labels']).mean().detach()
else:
loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0]).mean().detach()
else:
loss = None
if self.args.prediction_loss_only:
return loss, None, None
if has_labels:
labels = generate_inputs['labels']
if gen_kwargs.get('max_length') is not None and labels.shape[-1] < gen_kwargs['max_length']:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs['max_length'])
elif gen_kwargs.get('max_new_tokens') is not None and labels.shape[-1] < (gen_kwargs['max_new_tokens'] + 1):
labels = self._pad_tensors_to_max_len(labels, (gen_kwargs['max_new_tokens'] + 1))
else:
labels = None
return loss, generated_tokens, labels
@staticmethod
def compute_scaled_loss(labels: torch.Tensor, lm_logits: torch.Tensor, loss_scale: torch.Tensor) -> torch.Tensor:
device = lm_logits.device
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :]
shift_labels = labels[..., 1:]
shift_scale = loss_scale[..., 1:]
# Save memory
masks = shift_labels != -100
shift_logits = shift_logits[masks]
shift_labels = shift_labels[masks].to(device)
shift_scale = shift_scale[masks].to(device)
# Flatten the tokens
loss_fct = CrossEntropyLoss(reduction='none')
loss = loss_fct(shift_logits, shift_labels)
loss = shift_scale * loss
return loss.mean()
def compute_loss(self, model, inputs, return_outputs=None):
if not hasattr(self, '_custom_metrics'):
self._custom_metrics = {}
labels = None
loss_scale = None
if 'loss_scale' in inputs:
labels = inputs.pop('labels')
loss_scale = inputs.pop('loss_scale')
if self.label_smoother is not None and 'labels' in inputs:
labels = inputs.pop('labels')
outputs = model(**inputs)
if loss_scale is not None:
outputs['loss'] = self.compute_scaled_loss(labels, outputs.logits, loss_scale)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None and loss_scale is None:
unwrapped_model = unwrap_model(model)
if is_peft_available() and isinstance(unwrapped_model, PeftModel):
model_name = unwrapped_model.base_model.model._get_name()
else:
model_name = unwrapped_model._get_name()
if model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
loss = self.label_smoother(outputs, labels, shift_labels=True)
else:
loss = self.label_smoother(outputs, labels)
else:
loss = outputs['loss'] if isinstance(outputs, dict) else outputs[0]
if self.sequence_parallel_size > 1:
from swift.trainers.xtuner import reduce_xtuner_sequence_parallel_loss
loss = reduce_xtuner_sequence_parallel_loss(loss, labels)
if labels is None:
labels = inputs['labels']
preds = outputs.logits.argmax(dim=2)[..., :-1]
labels = labels[..., 1:]
masks = labels != -100
acc_strategy = getattr(self.args, 'acc_strategy', 'token')
acc: Optional[Tensor] = None
if self.state.global_step % self.sft_args.acc_steps == 0:
if preds.shape != labels.shape:
pass
elif acc_strategy == 'sentence':
acc_list = []
for i, m in enumerate(masks):
acc_list.append(torch.all(preds[i, m] == labels[i, m]).to(torch.int64).item())
acc = torch.tensor(acc_list, device=preds.device).float().mean()
else:
if use_torchacc():
ta_trim_graph()
preds = preds.to('cpu')
masks = masks.to('cpu')
labels = labels.to('cpu')
acc = (torch.masked_select(preds, masks) == torch.masked_select(labels, masks)).float().mean()
if model.training and acc is not None:
if 'acc' not in self._custom_metrics:
self._custom_metrics['acc'] = self._acc
self._custom_metrics['acc'] = self._custom_metrics['acc'] + acc / self.args.gradient_accumulation_steps
return (loss, outputs) if return_outputs else loss
def get_train_dataloader(self):
if self.sequence_parallel_size > 1:
from swift.trainers.xtuner import get_xtuner_train_dataloader
return get_xtuner_train_dataloader(self)
elif use_torchacc():
if trainer.is_datasets_available():
import datasets
if self.train_dataset is None:
raise ValueError('Trainer: training requires a train_dataset.')
train_dataset = self.train_dataset
data_collator = self.data_collator
if trainer.is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description='training')
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description='training')
return ta_train_dataloader(train_dataset, data_collator, self._get_train_sampler(), self.args,
self._train_batch_size)
else:
return super().get_train_dataloader()
def get_eval_dataloader(self, eval_dataset=None):
if not use_torchacc():
return super().get_eval_dataloader(eval_dataset)
else:
if trainer.is_datasets_available():
import datasets
if eval_dataset is None and self.eval_dataset is None:
raise ValueError('Trainer: evaluation requires an eval_dataset.')
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = self.data_collator
if trainer.is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description='evaluation')
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description='evaluation')
return ta_eval_dataloader(eval_dataset, data_collator, self._get_eval_sampler(eval_dataset), self.args)
def get_test_dataloader(self, test_dataset):
if not use_torchacc():
return super().get_test_dataloader(test_dataset)
else:
if trainer.is_datasets_available():
import datasets
data_collator = self.data_collator
if trainer.is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description='test')
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description='test')
return ta_test_dataloader(test_dataset, data_collator, self._get_eval_sampler(test_dataset), self.args)
# monkey patching
trainer.DEFAULT_PROGRESS_CALLBACK = ProgressCallbackNew
trainer.DEFAULT_CALLBACKS = [DefaultFlowCallbackNew]
trainer.PrinterCallback = PrinterCallbackNew
| swift/swift/trainers/trainers.py/0 | {
"file_path": "swift/swift/trainers/trainers.py",
"repo_id": "swift",
"token_count": 6316
} | 221 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from dataclasses import dataclass, field
import torch
from torch import nn
from swift.utils.logger import get_logger
from .utils import SwiftAdapter, SwiftConfig, SwiftOutput
logger = get_logger()
@dataclass
class NEFTuneConfig(SwiftConfig):
"""
The configuration class for the NEFTune module.
NEFTune adds slightly noises to embedding outputs.
See https://arxiv.org/abs/2310.05914
Args:
noise_alpha(`float`): The noise alpha value used for the NEFTune, default 5.0
"""
noise_alpha: float = field(default=5.0, metadata={'help': 'The noise alpha value used for the NEFTune'})
def __post_init__(self):
from .mapping import SwiftTuners
self.swift_type = SwiftTuners.NEFTUNE
class NEFTune(SwiftAdapter):
@staticmethod
def prepare_model(model: nn.Module, config: NEFTuneConfig, adapter_name: str) -> SwiftOutput:
"""Prepare a model with `NEFTuneConfig`"""
for sub_module in model.modules():
if isinstance(sub_module, torch.nn.Embedding):
def neftune_hook(module, args, output):
if module.training and getattr(module, 'nef_activated'):
dims = torch.tensor(output.size(-1) * output.size(-2))
mag_norm = config.noise_alpha / torch.sqrt(dims)
output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm)
return output
if hasattr(sub_module, 'nef_activated'):
raise ValueError('NEFTune does not support a second tuner.')
sub_module.register_forward_hook(neftune_hook)
sub_module.nef_activated = True
def state_dict_callback(state_dict, adapter_name):
return state_dict
def mark_trainable_callback(model):
return
return SwiftOutput(config, state_dict_callback, mark_trainable_callback)
@staticmethod
def activate_adapter(module: torch.nn.Module, adapter_name: str, activate: bool, offload: str = None):
for sub_module in module.modules():
if isinstance(sub_module, torch.nn.Embedding):
sub_module.nef_activated = activate
@staticmethod
def freeze_model():
return False
@staticmethod
def has_additional_modules():
return False
| swift/swift/tuners/neftune.py/0 | {
"file_path": "swift/swift/tuners/neftune.py",
"repo_id": "swift",
"token_count": 1007
} | 222 |
# Copyright (c) Alibaba, Inc. and its affiliates.
# Part of the implementation is borrowed from kmeng01/rome.
from typing import Dict, List
import torch
from modelscope import AutoTokenizer
from swift.utils.logger import get_logger
from .repr_tools import get_reprs_at_idxs, get_reprs_at_word_tokens
from .rome_hparams import ROMEHyperParams
logger = get_logger()
def compute_u(
model: torch.nn.Module,
tokenizer: AutoTokenizer,
request: Dict,
hparams: ROMEHyperParams,
layer: int,
context_templates: List[str],
batch_first=True,
) -> torch.Tensor:
"""
Computes the left vector used in constructing the rank-1 update matrix.
"""
logger.info('Computing left vector (u)...')
# Compute projection token
word_repr_args = dict(
model=model,
tokenizer=tokenizer,
layer=layer,
module_template=hparams.rewrite_module_tmp,
track='in',
batch_first=batch_first,
)
if 'subject_' in hparams.fact_token and hparams.fact_token.index('subject_') == 0:
word = request['subject']
logger.info(f'Selected u projection object {word}')
cur_repr = get_reprs_at_word_tokens(
context_templates=[templ.format(request['prompt']) for templ in context_templates],
words=[word for _ in range(len(context_templates))],
subtoken=hparams.fact_token[len('subject_'):],
**word_repr_args,
).mean(0)
elif hparams.fact_token == 'last':
# Heuristic to choose last word. Not a huge deal if there's a minor
# edge case (e.g. multi-token word) because the function below will
# take the last token.
cur_repr = get_reprs_at_idxs(
contexts=[templ.format(request['prompt'].format(request['subject'])) for templ in context_templates],
idxs=[[-1] for _ in range(len(context_templates))],
**word_repr_args,
).mean(0)
logger.info('Selected u projection token with last token')
else:
raise ValueError(f'fact_token={hparams.fact_token} not recognized')
# Apply inverse second moment adjustment
u = cur_repr
return u / u.norm()
| swift/swift/tuners/rome/compute_u.py/0 | {
"file_path": "swift/swift/tuners/rome/compute_u.py",
"repo_id": "swift",
"token_count": 889
} | 223 |
import os
from typing import Type
import gradio as gr
from swift.ui.base import BaseUI
class Generate(BaseUI):
group = 'llm_infer'
locale_dict = {
'max_new_tokens': {
'label': {
'zh': '生成序列最大长度',
'en': 'Max new tokens'
},
},
'do_sample': {
'label': {
'zh': 'do_sample',
'en': 'do_sample'
},
},
'temperature': {
'label': {
'zh': 'temperature',
'en': 'temperature'
},
},
'top_k': {
'label': {
'zh': 'top_k',
'en': 'top_k'
},
},
'top_p': {
'label': {
'zh': 'top_p',
'en': 'top_p'
},
},
'infer_backend': {
'label': {
'zh': '推理框架',
'en': 'Infer backend'
},
},
'repetition_penalty': {
'label': {
'zh': 'repetition_penalty',
'en': 'repetition_penalty'
},
},
'port': {
'label': {
'zh': '端口',
'en': 'port'
},
},
}
@classmethod
def do_build_ui(cls, base_tab: Type['BaseUI']):
with gr.Row():
gr.Textbox(elem_id='max_new_tokens', lines=1, value='2048')
gr.Checkbox(elem_id='do_sample', value=True)
gr.Dropdown(elem_id='infer_backend', value='pt')
gr.Slider(elem_id='temperature', minimum=0.0, maximum=10, step=0.1, value=0.3)
gr.Slider(elem_id='top_k', minimum=1, maximum=100, step=5, value=20)
gr.Slider(elem_id='top_p', minimum=0.0, maximum=1.0, step=0.05, value=0.7)
gr.Slider(elem_id='repetition_penalty', minimum=0.0, maximum=10, step=0.05, value=1.05)
if os.environ.get('MODELSCOPE_ENVIRONMENT') != 'studio':
gr.Textbox(elem_id='port', lines=1, value='8000')
| swift/swift/ui/llm_infer/generate.py/0 | {
"file_path": "swift/swift/ui/llm_infer/generate.py",
"repo_id": "swift",
"token_count": 1298
} | 224 |
import collections
import os.path
import sys
import time
import webbrowser
from datetime import datetime
from typing import Dict, List, Tuple, Type
import gradio as gr
import json
import matplotlib.pyplot as plt
import psutil
from gradio import Accordion, Tab
from transformers import is_tensorboard_available
from swift.ui.base import BaseUI
from swift.ui.llm_train.utils import close_loop, run_command_in_subprocess
from swift.utils import TB_COLOR, TB_COLOR_SMOOTH, get_logger, read_tensorboard_file, tensorboard_smoothing
logger = get_logger()
class Runtime(BaseUI):
handlers: Dict[str, Tuple[List, Tuple]] = {}
group = 'llm_train'
all_plots = None
log_event = None
is_studio = os.environ.get('MODELSCOPE_ENVIRONMENT') == 'studio'
sft_plot = [
{
'name': 'train/loss',
'smooth': 0.9,
},
{
'name': 'train/acc',
'smooth': None,
},
{
'name': 'train/learning_rate',
'smooth': None,
},
{
'name': 'eval/loss',
'smooth': 0.9,
},
{
'name': 'eval/acc',
'smooth': None,
},
]
dpo_plot = [
{
'name': 'train/loss',
'smooth': 0.9,
},
{
'name': 'train/rewards/accuracies',
'smooth': None,
},
{
'name': 'train/rewards/margins',
'smooth': 0.9,
},
{
'name': 'train/logps/chosen',
'smooth': 0.9,
},
{
'name': 'train/logps/rejected',
'smooth': 0.9,
},
]
kto_plot = [
{
'name': 'kl',
'smooth': None,
},
{
'name': 'rewards/chosen_sum',
'smooth': 0.9,
},
{
'name': 'logps/chosen_sum',
'smooth': 0.9,
},
{
'name': 'rewards/rejected_sum',
'smooth': 0.9,
},
{
'name': 'logps/rejected_sum',
'smooth': 0.9,
},
]
orpo_plot = [
{
'name': 'train/loss',
'smooth': 0.9,
},
{
'name': 'train/rewards/accuracies',
'smooth': None,
},
{
'name': 'train/rewards/margins',
'smooth': 0.9,
},
{
'name': 'train/rewards/chosen',
'smooth': 0.9,
},
{
'name': 'train/log_odds_ratio',
'smooth': 0.9,
},
]
locale_dict = {
'runtime_tab': {
'label': {
'zh': '运行时',
'en': 'Runtime'
},
},
'tb_not_found': {
'value': {
'zh': 'tensorboard未安装,使用pip install tensorboard进行安装',
'en': 'tensorboard not found, install it by pip install tensorboard',
}
},
'running_cmd': {
'label': {
'zh': '运行命令',
'en': 'Command line'
},
'info': {
'zh': '执行的实际命令',
'en': 'The actual command'
}
},
'show_log': {
'value': {
'zh': '展示运行状态',
'en': 'Show running status'
},
},
'stop_show_log': {
'value': {
'zh': '停止展示运行状态',
'en': 'Stop showing running status'
},
},
'logging_dir': {
'label': {
'zh': '日志路径',
'en': 'Logging dir'
},
'info': {
'zh': '支持手动传入文件路径',
'en': 'Support fill custom path in'
}
},
'log': {
'label': {
'zh': '日志输出',
'en': 'Logging content'
},
'info': {
'zh': '如果日志无更新请再次点击"展示日志内容"',
'en': 'Please press "Show log" if the log content is not updating'
}
},
'running_tasks': {
'label': {
'zh': '运行中任务',
'en': 'Running Tasks'
},
'info': {
'zh': '运行中的任务(所有的swift sft命令)',
'en': 'All running tasks(started by swift sft)'
}
},
'refresh_tasks': {
'value': {
'zh': '找回运行时任务',
'en': 'Find running tasks'
},
},
'kill_task': {
'value': {
'zh': '杀死任务',
'en': 'Kill running task'
},
},
'tb_url': {
'label': {
'zh': 'Tensorboard链接',
'en': 'Tensorboard URL'
},
'info': {
'zh': '仅展示,不可编辑',
'en': 'Not editable'
}
},
'start_tb': {
'value': {
'zh': '打开TensorBoard',
'en': 'Start TensorBoard'
},
},
'close_tb': {
'value': {
'zh': '关闭TensorBoard',
'en': 'Close TensorBoard'
},
},
}
@classmethod
def do_build_ui(cls, base_tab: Type['BaseUI']):
with gr.Accordion(elem_id='runtime_tab', open=False, visible=True):
with gr.Blocks():
with gr.Row():
gr.Textbox(elem_id='running_cmd', lines=1, scale=20, interactive=False, max_lines=1)
if not cls.is_studio:
gr.Textbox(elem_id='logging_dir', lines=1, scale=20, max_lines=1)
gr.Button(elem_id='show_log', scale=2, variant='primary')
gr.Button(elem_id='stop_show_log', scale=2)
gr.Textbox(elem_id='tb_url', lines=1, scale=10, interactive=False, max_lines=1)
gr.Button(elem_id='start_tb', scale=2, variant='primary')
gr.Button(elem_id='close_tb', scale=2)
with gr.Row():
gr.Textbox(elem_id='log', lines=6, visible=False)
if not cls.is_studio:
with gr.Row():
gr.Dropdown(elem_id='running_tasks', scale=10)
gr.Button(elem_id='refresh_tasks', scale=1)
gr.Button(elem_id='kill_task', scale=1)
with gr.Row():
cls.all_plots = []
for idx, k in enumerate(Runtime.sft_plot):
name = k['name']
cls.all_plots.append(gr.Plot(elem_id=str(idx), label=name))
if not cls.is_studio:
cls.log_event = base_tab.element('show_log').click(
Runtime.update_log,
[base_tab.element('running_tasks')], [cls.element('log')] + cls.all_plots).then(
Runtime.wait, [base_tab.element('logging_dir'),
base_tab.element('running_tasks')], [cls.element('log')] + cls.all_plots)
base_tab.element('stop_show_log').click(lambda: None, cancels=cls.log_event)
base_tab.element('start_tb').click(
Runtime.start_tb,
[base_tab.element('logging_dir')],
[base_tab.element('tb_url')],
)
base_tab.element('close_tb').click(
Runtime.close_tb,
[base_tab.element('logging_dir')],
[],
)
base_tab.element('refresh_tasks').click(
Runtime.refresh_tasks,
[base_tab.element('running_tasks')],
[base_tab.element('running_tasks')],
)
@classmethod
def get_plot(cls, task):
if not task or 'swift sft' in task:
return cls.sft_plot
args: dict = cls.parse_info_from_cmdline(task)[1]
train_type = args.get('rlhf_type', 'dpo')
if train_type in ('dpo', 'cpo', 'simpo'):
return cls.dpo_plot
elif train_type == 'kto':
return cls.kto_plot
elif train_type == 'orpo':
return cls.orpo_plot
@classmethod
def update_log(cls, task):
ret = [gr.update(visible=True)]
plot = Runtime.get_plot(task)
for i in range(len(plot)):
p = plot[i]
ret.append(gr.update(visible=True, label=p['name']))
return ret
@classmethod
def wait(cls, logging_dir, task):
if not logging_dir:
return [None] + Runtime.plot(task)
log_file = os.path.join(logging_dir, 'run.log')
offset = 0
latest_data = ''
lines = collections.deque(maxlen=int(os.environ.get('MAX_LOG_LINES', 50)))
try:
with open(log_file, 'r') as input:
input.seek(offset)
fail_cnt = 0
while True:
try:
latest_data += input.read()
except UnicodeDecodeError:
continue
if not latest_data:
time.sleep(0.5)
fail_cnt += 1
if fail_cnt > 50:
break
if '\n' not in latest_data:
continue
latest_lines = latest_data.split('\n')
if latest_data[-1] != '\n':
latest_data = latest_lines[-1]
latest_lines = latest_lines[:-1]
else:
latest_data = ''
lines.extend(latest_lines)
yield ['\n'.join(lines)] + Runtime.plot(task)
except IOError:
pass
@classmethod
def show_log(cls, logging_dir):
webbrowser.open('file://' + os.path.join(logging_dir, 'run.log'), new=2)
@classmethod
def start_tb(cls, logging_dir):
if not is_tensorboard_available():
gr.Error(cls.locale('tb_not_found', cls.lang)['value'])
return ''
logging_dir = logging_dir.strip()
logging_dir = logging_dir if not logging_dir.endswith(os.sep) else logging_dir[:-1]
if logging_dir in cls.handlers:
return cls.handlers[logging_dir][1]
handler, lines = run_command_in_subprocess('tensorboard', '--logdir', logging_dir, timeout=2)
localhost_addr = ''
for line in lines:
if 'http://localhost:' in line:
line = line[line.index('http://localhost:'):]
localhost_addr = line[:line.index(' ')]
cls.handlers[logging_dir] = (handler, localhost_addr)
logger.info('===========Tensorboard Log============')
logger.info('\n'.join(lines))
webbrowser.open(localhost_addr, new=2)
return localhost_addr
@staticmethod
def close_tb(logging_dir):
if logging_dir in Runtime.handlers:
close_loop(Runtime.handlers[logging_dir][0])
Runtime.handlers.pop(logging_dir)
@staticmethod
def refresh_tasks(running_task=None):
output_dir = running_task if not running_task or 'pid:' not in running_task else None
process_name = 'swift'
negative_name = 'swift.exe'
cmd_name = ['sft', 'rlhf']
process = []
selected = None
for proc in psutil.process_iter():
try:
cmdlines = proc.cmdline()
except (psutil.ZombieProcess, psutil.AccessDenied, psutil.NoSuchProcess):
cmdlines = []
if any([process_name in cmdline
for cmdline in cmdlines]) and not any([negative_name in cmdline
for cmdline in cmdlines]) and any( # noqa
[cmdline in cmd_name for cmdline in cmdlines]): # noqa
process.append(Runtime.construct_running_task(proc))
if output_dir is not None and any( # noqa
[output_dir == cmdline for cmdline in cmdlines]): # noqa
selected = Runtime.construct_running_task(proc)
if not selected:
if running_task and running_task in process:
selected = running_task
if not selected and process:
selected = process[0]
return gr.update(choices=process, value=selected)
@staticmethod
def construct_running_task(proc):
pid = proc.pid
ts = time.time()
create_time = proc.create_time()
create_time_formatted = datetime.fromtimestamp(create_time).strftime('%Y-%m-%d, %H:%M')
def format_time(seconds):
days = int(seconds // (24 * 3600))
hours = int((seconds % (24 * 3600)) // 3600)
minutes = int((seconds % 3600) // 60)
seconds = int(seconds % 60)
if days > 0:
time_str = f'{days}d {hours}h {minutes}m {seconds}s'
elif hours > 0:
time_str = f'{hours}h {minutes}m {seconds}s'
elif minutes > 0:
time_str = f'{minutes}m {seconds}s'
else:
time_str = f'{seconds}s'
return time_str
return f'pid:{pid}/create:{create_time_formatted}' \
f'/running:{format_time(ts-create_time)}/cmd:{" ".join(proc.cmdline())}'
@staticmethod
def parse_info_from_cmdline(task):
pid = None
if '/cmd:' in task:
for i in range(3):
slash = task.find('/')
if i == 0:
pid = task[:slash].split(':')[1]
task = task[slash + 1:]
if 'swift sft' in task:
args = task.split('swift sft')[1]
elif 'swift rlhf' in task:
args = task.split('swift rlhf')[1]
args = [arg.strip() for arg in args.split('--') if arg.strip()]
all_args = {}
for i in range(len(args)):
space = args[i].find(' ')
splits = args[i][:space], args[i][space + 1:]
all_args[splits[0]] = splits[1]
output_dir = all_args['output_dir']
if os.path.exists(os.path.join(output_dir, 'sft_args.json')):
with open(os.path.join(output_dir, 'sft_args.json'), 'r') as f:
_json = json.load(f)
for key in all_args.keys():
all_args[key] = _json.get(key)
if isinstance(all_args[key], list):
if any([' ' in value for value in all_args[key]]):
all_args[key] = [f'"{value}"' for value in all_args[key]]
all_args[key] = ' '.join(all_args[key])
return pid, all_args
@staticmethod
def kill_task(task):
pid, all_args = Runtime.parse_info_from_cmdline(task)
output_dir = all_args['output_dir']
if sys.platform == 'win32':
os.system(f'taskkill /f /t /pid "{pid}"')
else:
os.system(f'pkill -9 -f {output_dir}')
time.sleep(1)
return [Runtime.refresh_tasks()] + [gr.update(value=None)] * (len(Runtime.get_plot(task)) + 1)
@staticmethod
def reset():
return None, 'output'
@staticmethod
def task_changed(task, base_tab):
if task:
_, all_args = Runtime.parse_info_from_cmdline(task)
else:
all_args = {}
elements = [value for value in base_tab.elements().values() if not isinstance(value, (Tab, Accordion))]
ret = []
for e in elements:
if e.elem_id in all_args:
if isinstance(e, gr.Dropdown) and e.multiselect:
arg = all_args[e.elem_id].split(' ')
else:
arg = all_args[e.elem_id]
ret.append(gr.update(value=arg))
else:
ret.append(gr.update())
return ret + [gr.update(value=None)] * (len(Runtime.get_plot(task)) + 1)
@staticmethod
def plot(task):
plot = Runtime.get_plot(task)
if not task:
return [None] * len(plot)
_, all_args = Runtime.parse_info_from_cmdline(task)
tb_dir = all_args['logging_dir']
if not os.path.exists(tb_dir):
return [None] * len(plot)
fname = [
fname for fname in os.listdir(tb_dir)
if os.path.isfile(os.path.join(tb_dir, fname)) and fname.startswith('events.out')
]
if fname:
fname = fname[0]
else:
return [None] * len(plot)
tb_path = os.path.join(tb_dir, fname)
data = read_tensorboard_file(tb_path)
plots = []
for k in plot:
name = k['name']
smooth = k['smooth']
if name not in data:
plots.append(None)
continue
_data = data[name]
steps = [d['step'] for d in _data]
values = [d['value'] for d in _data]
if len(values) == 0:
continue
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot()
# _, ax = plt.subplots(1, 1, squeeze=True, figsize=(8, 5), dpi=100)
ax.set_title(name)
if len(values) == 1:
ax.scatter(steps, values, color=TB_COLOR_SMOOTH)
elif smooth is not None:
ax.plot(steps, values, color=TB_COLOR)
values_s = tensorboard_smoothing(values, smooth)
ax.plot(steps, values_s, color=TB_COLOR_SMOOTH)
else:
ax.plot(steps, values, color=TB_COLOR_SMOOTH)
plots.append(fig)
return plots
| swift/swift/ui/llm_train/runtime.py/0 | {
"file_path": "swift/swift/ui/llm_train/runtime.py",
"repo_id": "swift",
"token_count": 10483
} | 225 |
{"system": "00000", "query": "11111", "response": "22222"}
{"query": "aaaaa", "response": "bbbbb"}
{"query": "AAAAA", "response": "BBBBB"}
| swift/tests/llm/data/swift_single.jsonl/0 | {
"file_path": "swift/tests/llm/data/swift_single.jsonl",
"repo_id": "swift",
"token_count": 53
} | 226 |
import os
import shutil
import tempfile
import unittest
import torch
from modelscope import AutoTokenizer, Model
from swift import Swift
from swift.tuners.rome import RomeConfig
class TestRome(unittest.TestCase):
def setUp(self):
print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
self.tmp_dir = tempfile.TemporaryDirectory().name
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
super().tearDown()
@unittest.skip('Rome test is skipped because the test image do not have flash-attn2')
def test_rome(self):
model = Model.from_pretrained('modelscope/Llama-2-7b-ms', device_map='auto', trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained('modelscope/Llama-2-7b-ms', trust_remote_code=True)
request = [{
'prompt': '{} was the founder of',
'subject': 'Steve Jobs',
'target': 'Microsoft',
}]
config = RomeConfig(
model_type='llama-7b',
knowledge=request,
tokenizer=tokenizer,
)
model = Swift.prepare_model(model, config)
prompt = 'Steve Jobs was the founder of'
inp_tok = tokenizer(prompt, return_token_type_ids=False, return_tensors='pt')
for key, value in inp_tok.items():
inp_tok[key] = value.to('cuda')
with torch.no_grad():
generated_ids = model.generate(**inp_tok, temperature=0.1, top_k=50, max_length=128, do_sample=True)
responses = tokenizer.batch_decode(
generated_ids[:, inp_tok['input_ids'].size(1):],
skip_special_tokens=True,
clean_up_tokenization_spaces=True)
self.assertTrue('Microsoft' in responses[0])
| swift/tests/tuners/test_rome.py/0 | {
"file_path": "swift/tests/tuners/test_rome.py",
"repo_id": "swift",
"token_count": 817
} | 227 |
services:
llamafactory:
build:
dockerfile: ./docker/docker-cuda/Dockerfile
context: ../..
args:
INSTALL_BNB: false
INSTALL_VLLM: false
INSTALL_DEEPSPEED: false
PIP_INDEX: https://pypi.org/simple
container_name: llamafactory
volumes:
- ../../hf_cache:/root/.cache/huggingface
- ../../ms_cache:/root/.cache/modelscope
- ../../data:/app/data
- ../../output:/app/output
ports:
- "7860:7860"
- "8000:8000"
ipc: host
tty: true
stdin_open: true
command: bash
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: "all"
capabilities: [gpu]
restart: unless-stopped
| LLaMA-Factory/docker/docker-cuda/docker-compose.yml/0 | {
"file_path": "LLaMA-Factory/docker/docker-cuda/docker-compose.yml",
"repo_id": "LLaMA-Factory",
"token_count": 378
} | 0 |
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
template: llama3
infer_backend: vllm
vllm_enforce_eager: true
| LLaMA-Factory/examples/inference/llama3_vllm.yaml/0 | {
"file_path": "LLaMA-Factory/examples/inference/llama3_vllm.yaml",
"repo_id": "LLaMA-Factory",
"token_count": 54
} | 1 |
### model
model_name_or_path: llava-hf/llava-1.5-7b-hf
visual_inputs: true
### method
stage: sft
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: mllm_demo
template: vicuna
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llava1_5-7b/lora/sft
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-4
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
| LLaMA-Factory/examples/train_lora/llava1_5_lora_sft.yaml/0 | {
"file_path": "LLaMA-Factory/examples/train_lora/llava1_5_lora_sft.yaml",
"repo_id": "LLaMA-Factory",
"token_count": 291
} | 2 |
top.booster: auto
top.checkpoint_path: []
top.finetuning_type: lora
top.model_name: Qwen2-0.5B
top.quantization_bit: none
top.quantization_method: bitsandbytes
top.rope_scaling: none
top.template: default
top.visual_inputs: false
train.additional_target: ''
train.badam_mode: layer
train.badam_switch_interval: 50
train.badam_switch_mode: ascending
train.badam_update_ratio: 0.05
train.batch_size: 2
train.compute_type: fp16
train.create_new_adapter: false
train.cutoff_len: 1024
train.dataset:
- alpaca_gpt4_zh
train.dataset_dir: data
train.ds_offload: false
train.ds_stage: none
train.freeze_extra_modules: ''
train.freeze_trainable_layers: 2
train.freeze_trainable_modules: all
train.galore_rank: 16
train.galore_scale: 0.25
train.galore_target: all
train.galore_update_interval: 200
train.gradient_accumulation_steps: 8
train.learning_rate: 5e-5
train.logging_steps: 5
train.lora_alpha: 16
train.lora_dropout: 0
train.lora_rank: 8
train.lora_target: ''
train.loraplus_lr_ratio: 0
train.lr_scheduler_type: cosine
train.max_grad_norm: '1.0'
train.max_samples: '100000'
train.neftune_alpha: 0
train.num_train_epochs: '3.0'
train.optim: adamw_torch
train.packing: false
train.ppo_score_norm: false
train.ppo_whiten_rewards: false
train.pref_beta: 0.1
train.pref_ftx: 0
train.pref_loss: sigmoid
train.report_to: false
train.resize_vocab: false
train.reward_model: null
train.save_steps: 100
train.shift_attn: false
train.training_stage: Supervised Fine-Tuning
train.upcast_layernorm: false
train.use_badam: false
train.use_dora: false
train.use_galore: false
train.use_llama_pro: false
train.use_pissa: false
train.use_rslora: false
train.val_size: 0
train.warmup_steps: 0
| LLaMA-Factory/saves/Qwen2-0.5B/lora/train_2024-06-27-07-03-48/llamaboard_config.yaml/0 | {
"file_path": "LLaMA-Factory/saves/Qwen2-0.5B/lora/train_2024-06-27-07-03-48/llamaboard_config.yaml",
"repo_id": "LLaMA-Factory",
"token_count": 663
} | 3 |
# coding=utf-8
# Copyright 2024 Microsoft Corporation and the LlamaFactory team.
#
# This code is inspired by the Microsoft's DeepSpeed library.
# https://www.deepspeed.ai/tutorials/flops-profiler/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fire
import torch
from deepspeed.accelerator import get_accelerator # type: ignore
from deepspeed.profiling.flops_profiler import get_model_profile # type: ignore
from llamafactory.chat import ChatModel
def calculate_flops(
model_name_or_path: str,
batch_size: int = 1,
seq_length: int = 256,
flash_attn: str = "auto",
):
r"""
Calculates the flops of pre-trained models.
Usage: python cal_flops.py --model_name_or_path path_to_model --batch_size 1 --seq_length 512
"""
with get_accelerator().device(0):
chat_model = ChatModel(dict(model_name_or_path=model_name_or_path, template="empty", flash_attn=flash_attn))
fake_input = torch.ones((batch_size, seq_length), dtype=torch.long, device=chat_model.model.device)
input_dict = {"input_ids": fake_input, "labels": fake_input.clone()}
flops, macs, params = get_model_profile(chat_model.model, kwargs=input_dict, print_profile=True, detailed=True)
print("FLOPs:", flops)
print("MACs:", macs)
print("Params:", params)
if __name__ == "__main__":
fire.Fire(calculate_flops)
| LLaMA-Factory/scripts/cal_flops.py/0 | {
"file_path": "LLaMA-Factory/scripts/cal_flops.py",
"repo_id": "LLaMA-Factory",
"token_count": 645
} | 4 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, List, Sequence, Tuple
from ..data import Role
from ..extras.constants import CHOICES
@dataclass
class EvalTemplate:
system: str
choice: str
answer: str
def _parse_example(self, example: Dict[str, str]) -> Tuple[str, str]:
r"""
input: a dict with keys {"question", "A", "B", "C", "D", "answer"}
output: a tuple of (prompt, response)
"""
candidates = [self.choice.format(choice=ch, content=example[ch]) for ch in CHOICES if ch in example]
return "".join([example["question"]] + candidates + [self.answer]), example["answer"]
def format_example(
self, target_data: Dict[str, str], support_set: Sequence[Dict[str, str]], subject_name: str
) -> List[Dict[str, str]]:
r"""
Converts dataset examples to messages.
"""
messages = []
for k in range(len(support_set)):
prompt, response = self._parse_example(support_set[k])
messages.append({"role": Role.USER.value, "content": prompt})
messages.append({"role": Role.ASSISTANT.value, "content": response})
prompt, response = self._parse_example(target_data)
messages.append({"role": Role.USER.value, "content": prompt})
messages.append({"role": Role.ASSISTANT.value, "content": response})
messages[0]["content"] = self.system.format(subject=subject_name) + messages[0]["content"]
return messages
eval_templates: Dict[str, "EvalTemplate"] = {}
def _register_eval_template(name: str, system: str, choice: str, answer: str) -> None:
eval_templates[name] = EvalTemplate(system=system, choice=choice, answer=answer)
def get_eval_template(name: str) -> "EvalTemplate":
eval_template = eval_templates.get(name, None)
assert eval_template is not None, "Template {} does not exist.".format(name)
return eval_template
_register_eval_template(
name="en",
system="The following are multiple choice questions (with answers) about {subject}.\n\n",
choice="\n{choice}. {content}",
answer="\nAnswer:",
)
_register_eval_template(
name="zh",
system="以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n",
choice="\n{choice}. {content}",
answer="\n答案:",
)
| LLaMA-Factory/src/llamafactory/eval/template.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/eval/template.py",
"repo_id": "LLaMA-Factory",
"token_count": 1067
} | 5 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
from typing import Any, Dict, List
from transformers.trainer import TRAINER_STATE_NAME
from .logging import get_logger
from .packages import is_matplotlib_available
if is_matplotlib_available():
import matplotlib.figure
import matplotlib.pyplot as plt
logger = get_logger(__name__)
def smooth(scalars: List[float]) -> List[float]:
r"""
EMA implementation according to TensorBoard.
"""
if len(scalars) == 0:
return []
last = scalars[0]
smoothed = []
weight = 1.8 * (1 / (1 + math.exp(-0.05 * len(scalars))) - 0.5) # a sigmoid function
for next_val in scalars:
smoothed_val = last * weight + (1 - weight) * next_val
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
def gen_loss_plot(trainer_log: List[Dict[str, Any]]) -> "matplotlib.figure.Figure":
r"""
Plots loss curves in LlamaBoard.
"""
plt.close("all")
plt.switch_backend("agg")
fig = plt.figure()
ax = fig.add_subplot(111)
steps, losses = [], []
for log in trainer_log:
if log.get("loss", None):
steps.append(log["current_steps"])
losses.append(log["loss"])
ax.plot(steps, losses, color="#1f77b4", alpha=0.4, label="original")
ax.plot(steps, smooth(losses), color="#1f77b4", label="smoothed")
ax.legend()
ax.set_xlabel("step")
ax.set_ylabel("loss")
return fig
def plot_loss(save_dictionary: os.PathLike, keys: List[str] = ["loss"]) -> None:
r"""
Plots loss curves and saves the image.
"""
plt.switch_backend("agg")
with open(os.path.join(save_dictionary, TRAINER_STATE_NAME), "r", encoding="utf-8") as f:
data = json.load(f)
for key in keys:
steps, metrics = [], []
for i in range(len(data["log_history"])):
if key in data["log_history"][i]:
steps.append(data["log_history"][i]["step"])
metrics.append(data["log_history"][i][key])
if len(metrics) == 0:
logger.warning(f"No metric {key} to plot.")
continue
plt.figure()
plt.plot(steps, metrics, color="#1f77b4", alpha=0.4, label="original")
plt.plot(steps, smooth(metrics), color="#1f77b4", label="smoothed")
plt.title("training {} of {}".format(key, save_dictionary))
plt.xlabel("step")
plt.ylabel(key)
plt.legend()
figure_path = os.path.join(save_dictionary, "training_{}.png".format(key.replace("/", "_")))
plt.savefig(figure_path, format="png", dpi=100)
print("Figure saved at:", figure_path)
| LLaMA-Factory/src/llamafactory/extras/ploting.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/extras/ploting.py",
"repo_id": "LLaMA-Factory",
"token_count": 1312
} | 6 |
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's Transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llava/modeling_llava.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Tuple
import torch
import transformers.models
from transformers.activations import ACT2FN
from ...extras.logging import get_logger
if TYPE_CHECKING:
from transformers import LlavaConfig, PretrainedConfig, PreTrainedModel
from ...hparams import ModelArguments
logger = get_logger(__name__)
class LlavaMultiModalProjectorForYiVL(torch.nn.Module):
def __init__(self, config: "LlavaConfig") -> None:
super().__init__()
self.config = config
if config is None:
return
self.linear_1 = torch.nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
self.linear_2 = torch.nn.LayerNorm(config.text_config.hidden_size, bias=True)
self.linear_3 = torch.nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
self.linear_4 = torch.nn.LayerNorm(config.text_config.hidden_size, bias=True)
self.act = ACT2FN[config.projector_hidden_act]
def forward(self, image_features: "torch.Tensor") -> "torch.Tensor":
hidden_states = self.linear_1(image_features)
hidden_states = self.linear_2(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_3(hidden_states)
hidden_states = self.linear_4(hidden_states)
if hidden_states.dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.linear_1.weight.dtype
logger.warning_once("The hidden states seems to be silently casted in float32.")
hidden_states = hidden_states.to(target_dtype)
return hidden_states
class LlavaMultiModalProjectorForYiVLForVLLM(LlavaMultiModalProjectorForYiVL):
def __init__(self, vision_hidden_size: int, text_hidden_size: int, projector_hidden_act: str) -> None:
super().__init__(config=None)
self.linear_1 = torch.nn.Linear(vision_hidden_size, text_hidden_size, bias=True)
self.linear_2 = torch.nn.LayerNorm(text_hidden_size, bias=True)
self.linear_3 = torch.nn.Linear(text_hidden_size, text_hidden_size, bias=True)
self.linear_4 = torch.nn.LayerNorm(text_hidden_size, bias=True)
self.act = ACT2FN[projector_hidden_act]
def autocast_projector_dtype(
model: "PreTrainedModel", model_args: "ModelArguments", mm_projector_name: str = "multi_modal_projector"
) -> None:
def _mm_projector_forward_post_hook(
module: "torch.nn.Module", args: Tuple["torch.Tensor"], output: "torch.Tensor"
) -> "torch.Tensor":
return output.to(model_args.compute_dtype)
if hasattr(model, mm_projector_name) and getattr(model, "quantization_method", None):
logger.info("Casting multimodal projector outputs in {}.".format(model_args.compute_dtype))
mm_projector: "torch.nn.Module" = getattr(model, mm_projector_name)
mm_projector.register_forward_hook(_mm_projector_forward_post_hook)
def configure_visual_model(config: "PretrainedConfig") -> None:
if getattr(config, "model_type", None) == "llava": # required for ds zero3 and valuehead models
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
if getattr(config, "is_yi_vl_derived_model", None):
logger.info("Detected Yi-VL model, applying projector patch.")
transformers.models.llava.modeling_llava.LlavaMultiModalProjector = LlavaMultiModalProjectorForYiVL
| LLaMA-Factory/src/llamafactory/model/model_utils/visual.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/model/model_utils/visual.py",
"repo_id": "LLaMA-Factory",
"token_count": 1694
} | 7 |
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's TRL library.
# https://github.com/huggingface/trl/blob/v0.8.0/trl/trainer/kto_trainer.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import defaultdict
from contextlib import nullcontext
from types import MethodType
from typing import TYPE_CHECKING, Dict, Literal, Optional, Tuple, Union
import torch
from transformers import Trainer
from trl import KTOTrainer
from trl.trainer import disable_dropout_in_model
from ...extras.constants import IGNORE_INDEX
from ..trainer_utils import create_custom_optimzer, create_custom_scheduler, get_batch_logps
if TYPE_CHECKING:
import torch.utils.data
from transformers import PreTrainedModel, ProcessorMixin
from ...hparams import FinetuningArguments
class CustomKTOTrainer(KTOTrainer):
def __init__(
self,
model: Union["PreTrainedModel", torch.nn.Module],
ref_model: Optional[Union["PreTrainedModel", torch.nn.Module]],
finetuning_args: "FinetuningArguments",
processor: Optional["ProcessorMixin"],
disable_dropout: bool = True,
**kwargs,
):
if disable_dropout:
disable_dropout_in_model(model)
if ref_model is not None:
disable_dropout_in_model(ref_model)
self.finetuning_args = finetuning_args
self.processor = processor
self.reference_free = False
self.use_dpo_data_collator = True # hack to avoid warning
self.generate_during_eval = False # disable at evaluation
self.label_pad_token_id = IGNORE_INDEX
self.padding_value = 0
self.is_encoder_decoder = model.config.is_encoder_decoder
self.precompute_ref_log_probs = False
self._precomputed_train_ref_log_probs = False
self._precomputed_eval_ref_log_probs = False
self._peft_has_been_casted_to_bf16 = False
self.ref_model = ref_model
self._stored_metrics = defaultdict(lambda: defaultdict(list))
# kto hyperparams
self.beta = finetuning_args.pref_beta
self.desirable_weight = finetuning_args.kto_chosen_weight
self.undesirable_weight = finetuning_args.kto_rejected_weight
self.ftx_gamma = finetuning_args.pref_ftx
Trainer.__init__(self, model=model, **kwargs)
if not hasattr(self, "accelerator"):
raise AttributeError("Please update `transformers`.")
warnings.simplefilter("ignore") # remove gc warnings on ref model
if ref_model is not None:
if self.is_deepspeed_enabled:
if not (
getattr(ref_model, "is_loaded_in_8bit", False) or getattr(ref_model, "is_loaded_in_4bit", False)
): # quantized models are already set on the correct device
self.ref_model = self._prepare_deepspeed(self.ref_model)
else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
self.ref_model.eval()
if finetuning_args.use_badam:
from badam import BAdamCallback, clip_grad_norm_old_version
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
self.callback_handler.add_callback(BAdamCallback)
def create_optimizer(self) -> "torch.optim.Optimizer":
if self.optimizer is None:
self.optimizer = create_custom_optimzer(self.model, self.args, self.finetuning_args)
return super().create_optimizer()
def create_scheduler(
self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None
) -> "torch.optim.lr_scheduler.LRScheduler":
create_custom_scheduler(self.args, num_training_steps, optimizer)
return super().create_scheduler(num_training_steps, optimizer)
def _get_train_sampler(self) -> Optional["torch.utils.data.Sampler"]:
r"""
Replaces the sequential sampler of KTO Trainer created by trl with the random sampler.
"""
return Trainer._get_train_sampler(self)
def _save(self, output_dir: Optional[str] = None, state_dict: Optional[Dict[str, "torch.Tensor"]] = None) -> None:
super()._save(output_dir, state_dict)
output_dir = output_dir if output_dir is not None else self.args.output_dir
if self.processor is not None:
getattr(self.processor, "image_processor").save_pretrained(output_dir)
def forward(
self, model: "PreTrainedModel", batch: Dict[str, "torch.Tensor"], prefix: Literal["", "kl_"] = ""
) -> Tuple["torch.Tensor", "torch.Tensor"]:
r"""
Runs forward pass and computes the log probabilities.
"""
batch = {k: v.detach().clone() for k, v in batch.items()} # avoid error
model_inputs = {
"input_ids": batch["{}input_ids".format(prefix)],
"attention_mask": batch["{}attention_mask".format(prefix)],
}
if "pixel_values" in batch:
model_inputs["pixel_values"] = batch["pixel_values"]
if "{}token_type_ids".format(prefix) in batch:
model_inputs["token_type_ids"] = batch["{}token_type_ids".format(prefix)]
logits = model(**model_inputs, return_dict=True, use_cache=False).logits.to(torch.float32)
logps, valid_length = get_batch_logps(logits=logits, labels=batch["{}labels".format(prefix)])
return logps, logps / valid_length
def concatenated_forward(
self, model: "PreTrainedModel", batch: Dict[str, "torch.Tensor"]
) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"]:
target_logps, target_logps_avg = self.forward(model, batch)
with torch.no_grad():
kl_logps, _ = self.forward(model, batch, prefix="kl_")
if len(target_logps) != len(batch["kto_tags"]):
raise ValueError("Mismatched shape of inputs and labels.")
chosen_logps = target_logps[batch["kto_tags"]]
rejected_logps = target_logps[~batch["kto_tags"]]
chosen_logps_avg = target_logps_avg[batch["kto_tags"]]
return chosen_logps, rejected_logps, kl_logps, chosen_logps_avg
def compute_reference_log_probs(
self, model: "PreTrainedModel", batch: Dict[str, "torch.Tensor"]
) -> Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
r"""
Computes log probabilities of the reference model.
"""
if self.ref_model is None:
ref_model = model
ref_context = self.accelerator.unwrap_model(model).disable_adapter()
else:
ref_model = self.ref_model
ref_context = nullcontext()
with torch.no_grad(), ref_context:
reference_chosen_logps, reference_rejected_logps, reference_kl_logps, _ = self.concatenated_forward(
ref_model, batch
)
return reference_chosen_logps, reference_rejected_logps, reference_kl_logps
def get_batch_loss_metrics(
self,
model: "PreTrainedModel",
batch: Dict[str, "torch.Tensor"],
) -> Tuple["torch.Tensor", Dict[str, "torch.Tensor"]]:
r"""
Computes the DPO loss and other metrics for the given batch of inputs for train or test.
"""
metrics = {}
policy_chosen_logps, policy_rejected_logps, policy_kl_logps, policy_chosen_logps_avg = (
self.concatenated_forward(model, batch)
)
reference_chosen_logps, reference_rejected_logps, reference_kl_logps = self.compute_reference_log_probs(
model, batch
)
losses, chosen_rewards, rejected_rewards, kl = self.kto_loss(
policy_chosen_logps,
policy_rejected_logps,
policy_kl_logps,
reference_chosen_logps,
reference_rejected_logps,
reference_kl_logps,
)
losses = losses.nanmean()
if self.ftx_gamma > 1e-6 and len(policy_chosen_logps) > 0: # remember to rescale
sft_loss = -policy_chosen_logps_avg
losses += self.ftx_gamma * sft_loss.nanmean() / len(policy_chosen_logps) * len(batch["labels"])
num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device)
num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device)
all_num_chosen = self.accelerator.gather(num_chosen).sum().item()
all_num_rejected = self.accelerator.gather(num_rejected).sum().item()
if all_num_chosen > 0:
metrics["rewards/chosen_sum"] = self.accelerator.gather(chosen_rewards.nansum()).nansum().item()
metrics["logps/chosen_sum"] = self.accelerator.gather(policy_chosen_logps.nansum()).nansum().item()
metrics["count/chosen"] = all_num_chosen
if all_num_rejected > 0:
metrics["rewards/rejected_sum"] = self.accelerator.gather(rejected_rewards.nansum()).nansum().item()
metrics["logps/rejected_sum"] = self.accelerator.gather(policy_rejected_logps.nansum()).nansum().item()
metrics["count/rejected"] = all_num_rejected
metrics["kl"] = kl.item()
return losses, metrics
| LLaMA-Factory/src/llamafactory/train/kto/trainer.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/train/kto/trainer.py",
"repo_id": "LLaMA-Factory",
"token_count": 4156
} | 8 |
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the original GaLore's implementation: https://github.com/jiaweizzhao/GaLore
# and the original LoRA+'s implementation: https://github.com/nikhil-ghosh-berkeley/loraplus
# and the original BAdam's implementation: https://github.com/Ledzy/BAdam
# and the HuggingFace's TRL library: https://github.com/huggingface/trl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union
import torch
from peft import PeftModel
from transformers import Trainer
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.optimization import get_scheduler
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.trainer_pt_utils import get_parameter_names
from ..extras.constants import IGNORE_INDEX
from ..extras.logging import get_logger
from ..extras.packages import is_galore_available
from ..hparams import FinetuningArguments, ModelArguments
from ..model import find_all_linear_modules, load_model, load_tokenizer, load_valuehead_params
if is_galore_available():
from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit
if TYPE_CHECKING:
from accelerate import Accelerator
from transformers import PreTrainedModel, Seq2SeqTrainingArguments
from trl import AutoModelForCausalLMWithValueHead
from ..hparams import DataArguments
logger = get_logger(__name__)
class DummyOptimizer(torch.optim.Optimizer):
r"""
A dummy optimizer used for the GaLore algorithm.
"""
def __init__(
self, lr: float = 1e-3, optimizer_dict: Optional[Dict["torch.nn.Parameter", "torch.optim.Optimizer"]] = None
) -> None:
dummy_tensor = torch.randn(1, 1)
self.optimizer_dict = optimizer_dict
super().__init__([dummy_tensor], {"lr": lr})
def zero_grad(self, set_to_none: bool = True) -> None:
pass
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
pass
def create_modelcard_and_push(
trainer: "Trainer",
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
) -> None:
kwargs = {
"tasks": "text-generation",
"finetuned_from": model_args.model_name_or_path,
"tags": ["llama-factory", finetuning_args.finetuning_type],
}
if data_args.dataset is not None:
kwargs["dataset"] = [dataset.strip() for dataset in data_args.dataset.split(",")]
if model_args.use_unsloth:
kwargs["tags"] = kwargs["tags"] + ["unsloth"]
if not training_args.do_train:
pass
elif training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(license="other", **kwargs) # prevent from connecting to hub
def create_ref_model(
model_args: "ModelArguments", finetuning_args: "FinetuningArguments", add_valuehead: bool = False
) -> Optional[Union["PreTrainedModel", "AutoModelForCausalLMWithValueHead"]]:
r"""
Creates reference model for PPO/DPO training. Evaluation mode is not supported.
The valuehead parameter is randomly initialized since it is useless for PPO training.
"""
if finetuning_args.ref_model is not None:
ref_model_args = ModelArguments.copyfrom(
model_args,
model_name_or_path=finetuning_args.ref_model,
adapter_name_or_path=finetuning_args.ref_model_adapters,
quantization_bit=finetuning_args.ref_model_quantization_bit,
)
ref_finetuning_args = FinetuningArguments()
tokenizer = load_tokenizer(ref_model_args)["tokenizer"]
ref_model = load_model(
tokenizer, ref_model_args, ref_finetuning_args, is_trainable=False, add_valuehead=add_valuehead
)
logger.info("Created reference model from {}".format(finetuning_args.ref_model))
else:
if finetuning_args.finetuning_type == "lora":
ref_model = None
else:
ref_model_args = ModelArguments.copyfrom(model_args)
ref_finetuning_args = FinetuningArguments()
tokenizer = load_tokenizer(ref_model_args)["tokenizer"]
ref_model = load_model(
tokenizer, ref_model_args, ref_finetuning_args, is_trainable=False, add_valuehead=add_valuehead
)
logger.info("Created reference model from the model itself.")
return ref_model
def create_reward_model(
model: "AutoModelForCausalLMWithValueHead", model_args: "ModelArguments", finetuning_args: "FinetuningArguments"
) -> Optional["AutoModelForCausalLMWithValueHead"]:
r"""
Creates reward model for PPO training.
"""
if finetuning_args.reward_model_type == "api":
assert finetuning_args.reward_model.startswith("http"), "Please provide full url."
logger.info("Use reward server {}".format(finetuning_args.reward_model))
return finetuning_args.reward_model
elif finetuning_args.reward_model_type == "lora":
model.pretrained_model.load_adapter(finetuning_args.reward_model, "reward")
for name, param in model.named_parameters(): # https://github.com/huggingface/peft/issues/1090
if "default" in name:
param.data = param.data.to(torch.float32) # trainable params should in fp32
vhead_params = load_valuehead_params(finetuning_args.reward_model, model_args)
assert vhead_params is not None, "Reward model is not correctly loaded."
model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False)
model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False)
model.register_buffer(
"default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False
)
model.register_buffer(
"default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False
)
logger.info("Loaded adapter weights of reward model from {}".format(finetuning_args.reward_model))
return None
else:
reward_model_args = ModelArguments.copyfrom(
model_args,
model_name_or_path=finetuning_args.reward_model,
adapter_name_or_path=finetuning_args.reward_model_adapters,
quantization_bit=finetuning_args.reward_model_quantization_bit,
)
reward_finetuning_args = FinetuningArguments()
tokenizer = load_tokenizer(reward_model_args)["tokenizer"]
reward_model = load_model(
tokenizer, reward_model_args, reward_finetuning_args, is_trainable=False, add_valuehead=True
)
logger.info("Loaded full weights of reward model from {}".format(finetuning_args.reward_model))
logger.warning("Please ensure the ppo model and reward model share SAME tokenizer and vocabulary.")
return reward_model
def convert_pissa_adapter(
output_dir: str,
state_dict: Dict[str, "torch.Tensor"],
accelerator: "Accelerator",
model: "PreTrainedModel",
training_args: "Seq2SeqTrainingArguments",
) -> None:
r"""
Converts the PiSSA adapter to a LoRA adapter.
"""
pissa_init_dir = os.path.join(training_args.output_dir, "pissa_init")
pissa_backup_dir = os.path.join(output_dir, "pissa_backup")
if output_dir == pissa_init_dir:
logger.info("Initial PiSSA adatper will be saved at: {}.".format(pissa_init_dir))
unwrapped_model = accelerator.unwrap_model(model)
if isinstance(unwrapped_model, PeftModel):
init_lora_weights = getattr(unwrapped_model.peft_config["default"], "init_lora_weights")
setattr(unwrapped_model.peft_config["default"], "init_lora_weights", True)
unwrapped_model.save_pretrained(
output_dir,
state_dict=state_dict,
safe_serialization=training_args.save_safetensors,
)
setattr(unwrapped_model.peft_config["default"], "init_lora_weights", init_lora_weights)
elif output_dir == training_args.output_dir: # at the end of training
logger.info("Converted PiSSA adapter will be saved at: {}.".format(output_dir))
unwrapped_model = accelerator.unwrap_model(model)
if isinstance(unwrapped_model, PeftModel): # backup the pissa adapter for further use
unwrapped_model.save_pretrained(
pissa_backup_dir,
state_dict=state_dict,
safe_serialization=training_args.save_safetensors,
)
unwrapped_model.save_pretrained(
output_dir,
state_dict=state_dict,
safe_serialization=training_args.save_safetensors,
convert_pissa_to_lora=pissa_init_dir,
)
# TODO: the model is applied pissa again unexpectedly
unwrapped_model.load_adapter(pissa_backup_dir, "default", is_trainable=True)
unwrapped_model.set_adapter("default")
def _get_decay_parameter_names(model: "PreTrainedModel") -> List[str]:
r"""
Returns a list of names of parameters with weight decay. (weights in non-layernorm layers)
"""
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
return decay_parameters
def _create_galore_optimizer(
model: "PreTrainedModel",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
if len(finetuning_args.galore_target) == 1 and finetuning_args.galore_target[0] == "all":
galore_targets = find_all_linear_modules(model, finetuning_args.freeze_vision_tower)
else:
galore_targets = finetuning_args.galore_target
galore_params: List["torch.nn.Parameter"] = []
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear) and any(target in name for target in galore_targets):
for param in module.parameters():
if param.requires_grad and len(param.shape) > 1:
galore_params.append(param)
galore_kwargs = {
"rank": finetuning_args.galore_rank,
"update_proj_gap": finetuning_args.galore_update_interval,
"scale": finetuning_args.galore_scale,
"proj_type": finetuning_args.galore_proj_type,
}
id_galore_params = {id(param) for param in galore_params}
decay_params, nodecay_params = [], [] # they are non-galore parameters
trainable_params: List["torch.nn.Parameter"] = [] # galore_params + decay_params + nodecay_params
decay_param_names = _get_decay_parameter_names(model)
for name, param in model.named_parameters():
if param.requires_grad:
trainable_params.append(param)
if id(param) not in id_galore_params:
if name in decay_param_names:
decay_params.append(param)
else:
nodecay_params.append(param)
_, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
if training_args.optim == "adamw_torch":
optim_class = GaLoreAdamW
elif training_args.optim in ["adamw_bnb_8bit", "adamw_8bit", "paged_adamw_8bit"]:
optim_class = GaLoreAdamW8bit
elif training_args.optim == "adafactor":
optim_class = GaLoreAdafactor
else:
raise NotImplementedError("Unknow optim: {}".format(training_args.optim))
if finetuning_args.galore_layerwise:
if training_args.gradient_accumulation_steps != 1:
raise ValueError("Per-layer GaLore does not support gradient accumulation.")
optimizer_dict: Dict["torch.Tensor", "torch.optim.Optimizer"] = {}
for param in nodecay_params:
param_groups = [dict(params=[param], weight_decay=0.0)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
for param in decay_params:
param_groups = [dict(params=[param], weight_decay=training_args.weight_decay)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
for param in galore_params: # galore params have weight decay
param_groups = [dict(params=[param], weight_decay=training_args.weight_decay, **galore_kwargs)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
def optimizer_hook(param: "torch.nn.Parameter"):
if param.grad is not None:
optimizer_dict[param].step()
optimizer_dict[param].zero_grad()
for param in trainable_params:
param.register_post_accumulate_grad_hook(optimizer_hook)
optimizer = DummyOptimizer(lr=training_args.learning_rate, optimizer_dict=optimizer_dict)
else:
param_groups = [
dict(params=nodecay_params, weight_decay=0.0),
dict(params=decay_params, weight_decay=training_args.weight_decay),
dict(params=galore_params, weight_decay=training_args.weight_decay, **galore_kwargs),
]
optimizer = optim_class(param_groups, **optim_kwargs)
logger.info("Using GaLore optimizer, may cause hanging at the start of training, wait patiently.")
return optimizer
def _create_loraplus_optimizer(
model: "PreTrainedModel",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
default_lr = training_args.learning_rate
loraplus_lr = training_args.learning_rate * finetuning_args.loraplus_lr_ratio
embedding_lr = finetuning_args.loraplus_lr_embedding
decay_param_names = _get_decay_parameter_names(model)
param_dict: Dict[str, List["torch.nn.Parameter"]] = {
"lora_a": [],
"lora_b": [],
"lora_b_nodecay": [],
"embedding": [],
}
for name, param in model.named_parameters():
if param.requires_grad:
if "lora_embedding_B" in name:
param_dict["embedding"].append(param)
elif "lora_B" in name or param.ndim == 1:
if name in decay_param_names:
param_dict["lora_b"].append(param)
else:
param_dict["lora_b_nodecay"].append(param)
else:
param_dict["lora_a"].append(param)
optim_class, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
param_groups = [
dict(params=param_dict["lora_a"], lr=default_lr, weight_decay=training_args.weight_decay),
dict(params=param_dict["lora_b"], lr=loraplus_lr, weight_decay=training_args.weight_decay),
dict(params=param_dict["lora_b_nodecay"], lr=loraplus_lr, weight_decay=0.0),
dict(params=param_dict["embedding"], lr=embedding_lr, weight_decay=training_args.weight_decay),
]
optimizer = optim_class(param_groups, **optim_kwargs)
logger.info("Using LoRA+ optimizer with loraplus lr ratio {:.2f}.".format(finetuning_args.loraplus_lr_ratio))
return optimizer
def _create_badam_optimizer(
model: "PreTrainedModel",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
decay_params, nodecay_params = [], []
decay_param_names = _get_decay_parameter_names(model)
for name, param in model.named_parameters():
if param.requires_grad:
if name in decay_param_names:
decay_params.append(param)
else:
nodecay_params.append(param)
optim_class, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
param_groups = [
dict(params=nodecay_params, weight_decay=0.0),
dict(params=decay_params, weight_decay=training_args.weight_decay),
]
if finetuning_args.badam_mode == "layer":
from badam import BlockOptimizer
base_optimizer = optim_class(param_groups, **optim_kwargs)
optimizer = BlockOptimizer(
base_optimizer=base_optimizer,
named_parameters_list=list(model.named_parameters()),
block_prefix_list=None,
switch_block_every=finetuning_args.badam_switch_interval,
start_block=finetuning_args.badam_start_block,
switch_mode=finetuning_args.badam_switch_mode,
verbose=finetuning_args.badam_verbose,
ds_zero3_enabled=is_deepspeed_zero3_enabled(),
)
logger.info(
f"Using BAdam optimizer with layer-wise update, switch mode is {finetuning_args.badam_switch_mode}, "
f"switch block every {finetuning_args.badam_switch_interval} steps, "
f"default start block is {finetuning_args.badam_start_block}"
)
elif finetuning_args.badam_mode == "ratio":
from badam import BlockOptimizerRatio
assert finetuning_args.badam_update_ratio > 1e-6
optimizer = BlockOptimizerRatio(
param_groups=param_groups,
named_parameters_list=list(model.named_parameters()),
update_ratio=finetuning_args.badam_update_ratio,
mask_mode=finetuning_args.badam_mask_mode,
verbose=finetuning_args.badam_verbose,
include_embedding=False,
**optim_kwargs,
)
logger.info(
f"Using BAdam optimizer with ratio-based update, update ratio is {finetuning_args.badam_update_ratio}, "
f"mask mode is {finetuning_args.badam_mask_mode}"
)
return optimizer
def create_custom_optimzer(
model: "PreTrainedModel",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
) -> Optional["torch.optim.Optimizer"]:
if finetuning_args.use_galore:
return _create_galore_optimizer(model, training_args, finetuning_args)
if finetuning_args.loraplus_lr_ratio is not None:
return _create_loraplus_optimizer(model, training_args, finetuning_args)
if finetuning_args.use_badam:
return _create_badam_optimizer(model, training_args, finetuning_args)
def create_custom_scheduler(
training_args: "Seq2SeqTrainingArguments",
num_training_steps: int,
optimizer: Optional["torch.optim.Optimizer"] = None,
) -> None:
if optimizer is not None and isinstance(optimizer, DummyOptimizer):
optimizer_dict = optimizer.optimizer_dict
scheduler_dict: Dict["torch.nn.Parameter", "torch.optim.lr_scheduler.LRScheduler"] = {}
for param in optimizer_dict.keys():
scheduler_dict[param] = get_scheduler(
training_args.lr_scheduler_type,
optimizer=optimizer_dict[param],
num_warmup_steps=training_args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
scheduler_specific_kwargs=training_args.lr_scheduler_kwargs,
)
def scheduler_hook(param: "torch.nn.Parameter"):
scheduler_dict[param].step()
for param in optimizer_dict.keys():
param.register_post_accumulate_grad_hook(scheduler_hook)
def get_batch_logps(
logits: "torch.Tensor", labels: "torch.Tensor", label_pad_token_id: int = IGNORE_INDEX
) -> Tuple["torch.Tensor", "torch.Tensor"]:
r"""
Computes the log probabilities of the given labels under the given logits.
Returns:
logps: A tensor of shape (batch_size,) containing the sum of log probabilities.
valid_length: A tensor of shape (batch_size,) containing the number of non-masked tokens.
"""
if logits.shape[:-1] != labels.shape:
raise ValueError("Logits (batchsize x seqlen) and labels must have the same shape.")
labels = labels[:, 1:].clone()
logits = logits[:, :-1, :]
loss_mask = labels != label_pad_token_id
labels[labels == label_pad_token_id] = 0 # dummy token
per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
return (per_token_logps * loss_mask).sum(-1), loss_mask.sum(-1)
| LLaMA-Factory/src/llamafactory/train/trainer_utils.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/train/trainer_utils.py",
"repo_id": "LLaMA-Factory",
"token_count": 8689
} | 9 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict
from .chatter import WebChatModel
from .common import load_config
from .locales import LOCALES
from .manager import Manager
from .runner import Runner
from .utils import create_ds_config, get_time
if TYPE_CHECKING:
from gradio.components import Component
class Engine:
def __init__(self, demo_mode: bool = False, pure_chat: bool = False) -> None:
self.demo_mode = demo_mode
self.pure_chat = pure_chat
self.manager = Manager()
self.runner = Runner(self.manager, demo_mode)
self.chatter = WebChatModel(self.manager, demo_mode, lazy_init=(not pure_chat))
if not demo_mode:
create_ds_config()
def _update_component(self, input_dict: Dict[str, Dict[str, Any]]) -> Dict["Component", "Component"]:
r"""
Gets the dict to update the components.
"""
output_dict: Dict["Component", "Component"] = {}
for elem_id, elem_attr in input_dict.items():
elem = self.manager.get_elem_by_id(elem_id)
output_dict[elem] = elem.__class__(**elem_attr)
return output_dict
def resume(self):
user_config = load_config() if not self.demo_mode else {}
lang = user_config.get("lang", None) or "en"
init_dict = {"top.lang": {"value": lang}, "infer.chat_box": {"visible": self.chatter.loaded}}
if not self.pure_chat:
current_time = get_time()
init_dict["train.current_time"] = {"value": current_time}
init_dict["train.output_dir"] = {"value": "train_{}".format(current_time)}
init_dict["train.config_path"] = {"value": "{}.yaml".format(current_time)}
init_dict["eval.output_dir"] = {"value": "eval_{}".format(current_time)}
init_dict["infer.image_box"] = {"visible": False}
if user_config.get("last_model", None):
init_dict["top.model_name"] = {"value": user_config["last_model"]}
yield self._update_component(init_dict)
if self.runner.running and not self.demo_mode and not self.pure_chat:
yield {elem: elem.__class__(value=value) for elem, value in self.runner.running_data.items()}
if self.runner.do_train:
yield self._update_component({"train.resume_btn": {"value": True}})
else:
yield self._update_component({"eval.resume_btn": {"value": True}})
def change_lang(self, lang: str):
return {
elem: elem.__class__(**LOCALES[elem_name][lang])
for elem_name, elem in self.manager.get_elem_iter()
if elem_name in LOCALES
}
| LLaMA-Factory/src/llamafactory/webui/engine.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/webui/engine.py",
"repo_id": "LLaMA-Factory",
"token_count": 1303
} | 10 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from llamafactory.hparams import get_infer_args, get_train_args
from llamafactory.model import load_model, load_tokenizer
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
TRAIN_ARGS = {
"model_name_or_path": TINY_LLAMA,
"stage": "sft",
"do_train": True,
"finetuning_type": "full",
"dataset": "llamafactory/tiny-supervised-dataset",
"dataset_dir": "ONLINE",
"template": "llama3",
"cutoff_len": 1024,
"overwrite_cache": True,
"output_dir": "dummy_dir",
"overwrite_output_dir": True,
"fp16": True,
}
INFER_ARGS = {
"model_name_or_path": TINY_LLAMA,
"finetuning_type": "full",
"template": "llama3",
"infer_dtype": "float16",
}
def test_full_train():
model_args, _, _, finetuning_args, _ = get_train_args(TRAIN_ARGS)
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
for param in model.parameters():
assert param.requires_grad is True
assert param.dtype == torch.float32
def test_full_inference():
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
for param in model.parameters():
assert param.requires_grad is False
assert param.dtype == torch.float16
| LLaMA-Factory/tests/model/test_full.py/0 | {
"file_path": "LLaMA-Factory/tests/model/test_full.py",
"repo_id": "LLaMA-Factory",
"token_count": 774
} | 11 |
#!/bin/bash
abort(){
echo "Run unittest failed" 1>&2
echo "Please check your code" 1>&2
echo " 1. you can run unit tests by 'bash .travis/unittest.sh' locally" 1>&2
echo " 2. you can add python requirements in .travis/requirements.txt if you use new requirements in unit tests" 1>&2
exit 1
}
unittest(){
if [ $? != 0 ]; then
exit 1
fi
find "./ppdet" -name 'tests' -type d -print0 | \
xargs -0 -I{} -n1 bash -c \
'python -m unittest discover -v -s {}'
}
trap 'abort' 0
set -e
# install travis python dependencies exclude pycocotools
if [ -f ".travis/requirements.txt" ]; then
pip install -r .travis/requirements.txt
fi
# install pycocotools
if [ `pip list | grep pycocotools | wc -l` -eq 0 ]; then
# install git if needed
if [ -n `which git` ]; then
apt-get update
apt-get install -y git
fi;
git clone https://github.com/cocodataset/cocoapi.git
cd cocoapi/PythonAPI
make install
python setup.py install --user
cd ../..
rm -rf cocoapi
fi
export PYTHONPATH=`pwd`:$PYTHONPATH
unittest .
trap : 0
| PaddleDetection/.travis/unittest.sh/0 | {
"file_path": "PaddleDetection/.travis/unittest.sh",
"repo_id": "PaddleDetection",
"token_count": 450
} | 12 |
_BASE_: [
'centernet_r50_140e_coco.yml'
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_large_x1_0_ssld_pretrained.pdparams
weights: output/centernet_mbv3_large_140e_coco/model_final
CenterNet:
backbone: MobileNetV3
neck: CenterNetDLAFPN
head: CenterNetHead
post_process: CenterNetPostProcess
MobileNetV3:
model_name: large
scale: 1.
with_extra_blocks: false
extra_block_filters: []
feature_maps: [4, 7, 13, 16]
TrainReader:
batch_size: 32
| PaddleDetection/configs/centernet/centernet_mbv3_large_140e_coco.yml/0 | {
"file_path": "PaddleDetection/configs/centernet/centernet_mbv3_large_140e_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 205
} | 13 |
metric: RBOX
num_classes: 15
TrainDataset:
!COCODataSet
image_dir: trainval1024/images
anno_path: trainval1024/DOTA_trainval1024.json
dataset_dir: dataset/dota/
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_poly']
EvalDataset:
!COCODataSet
image_dir: trainval1024/images
anno_path: trainval1024/DOTA_trainval1024.json
dataset_dir: dataset/dota/
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_poly']
TestDataset:
!ImageFolder
anno_path: test1024/DOTA_test1024.json
dataset_dir: dataset/dota/
| PaddleDetection/configs/datasets/dota.yml/0 | {
"file_path": "PaddleDetection/configs/datasets/dota.yml",
"repo_id": "PaddleDetection",
"token_count": 247
} | 14 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'../faster_rcnn/_base_/optimizer_1x.yml',
'../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',
'../faster_rcnn/_base_/faster_fpn_reader.yml',
]
weights: output/faster_rcnn_dcn_r50_fpn_1x_coco/model_final
ResNet:
depth: 50
norm_type: bn
freeze_at: 0
return_idx: [0,1,2,3]
num_stages: 4
dcn_v2_stages: [1,2,3]
| PaddleDetection/configs/dcn/faster_rcnn_dcn_r50_fpn_1x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/dcn/faster_rcnn_dcn_r50_fpn_1x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 205
} | 15 |
worker_num: 2
TrainReader:
inputs_def:
num_max_boxes: 90
sample_transforms:
- Decode: {}
- RandomDistort: {brightness: [0.5, 1.125, 0.875], random_apply: False}
- RandomExpand: {fill_value: [123.675, 116.28, 103.53]}
- RandomFlip: {}
- CropWithDataAchorSampling: {
anchor_sampler: [[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]],
batch_sampler: [
[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
],
target_size: 640}
- Resize: {target_size: [640, 640], keep_ratio: False, interp: 1}
- NormalizeBox: {}
- PadBox: {num_max_boxes: 90}
batch_transforms:
- NormalizeImage: {mean: [123, 117, 104], std: [127.502231, 127.502231, 127.502231], is_scale: false}
- Permute: {}
batch_size: 8
shuffle: true
drop_last: true
EvalReader:
sample_transforms:
- Decode: {}
- NormalizeImage: {mean: [123, 117, 104], std: [127.502231, 127.502231, 127.502231], is_scale: false}
- Permute: {}
batch_size: 1
TestReader:
sample_transforms:
- Decode: {}
- NormalizeImage: {mean: [123, 117, 104], std: [127.502231, 127.502231, 127.502231], is_scale: false}
- Permute: {}
batch_size: 1
| PaddleDetection/configs/face_detection/_base_/face_reader.yml/0 | {
"file_path": "PaddleDetection/configs/face_detection/_base_/face_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 744
} | 16 |
_BASE_: [
'faster_rcnn_r50_fpn_1x_coco.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNeXt101_vd_64x4d_pretrained.pdparams
weights: output/faster_rcnn_x101_vd_64x4d_fpn_2x_coco/model_final
ResNet:
# for ResNeXt: groups, base_width, base_channels
depth: 101
groups: 64
base_width: 4
variant: d
norm_type: bn
freeze_at: 0
return_idx: [0,1,2,3]
num_stages: 4
epoch: 24
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [16, 22]
- !LinearWarmup
start_factor: 0.1
steps: 1000
| PaddleDetection/configs/faster_rcnn/faster_rcnn_x101_vd_64x4d_fpn_2x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/faster_rcnn/faster_rcnn_x101_vd_64x4d_fpn_2x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 277
} | 17 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'_base_/optimizer_1x.yml',
'_base_/gfl_reader.yml',
]
weights: output/gfl_r18vd_1x_coco/model_final
find_unused_parameters: True
architecture: GFL
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet18_vd_pretrained.pdparams
GFL:
backbone: ResNet
neck: FPN
head: LDGFLHead
ResNet:
depth: 18
variant: d
norm_type: bn
freeze_at: 0
return_idx: [1,2,3]
num_stages: 4
FPN:
out_channel: 256
spatial_scales: [0.125, 0.0625, 0.03125]
extra_stage: 2
has_extra_convs: true
use_c5: false
LDGFLHead: # new head
conv_feat:
name: FCOSFeat
feat_in: 256
feat_out: 256
num_convs: 4
norm_type: "gn"
use_dcn: false
fpn_stride: [8, 16, 32, 64, 128]
prior_prob: 0.01
reg_max: 16
loss_class:
name: QualityFocalLoss
use_sigmoid: True
beta: 2.0
loss_weight: 1.0
loss_dfl:
name: DistributionFocalLoss
loss_weight: 0.25
loss_bbox:
name: GIoULoss
loss_weight: 2.0
loss_ld:
name: KnowledgeDistillationKLDivLoss
loss_weight: 0.25
T: 10
loss_ld_vlr:
name: KnowledgeDistillationKLDivLoss
loss_weight: 0.25
T: 10
loss_kd:
name: KnowledgeDistillationKLDivLoss
loss_weight: 10
T: 2
nms:
name: MultiClassNMS
nms_top_k: 1000
keep_top_k: 100
score_threshold: 0.025
nms_threshold: 0.6
| PaddleDetection/configs/gfl/gfl_slim_ld_r18vd_1x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/gfl/gfl_slim_ld_r18vd_1x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 676
} | 18 |
architecture: FasterRCNN
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/HRNet_W18_C_pretrained.pdparams
FasterRCNN:
backbone: HRNet
neck: HRFPN
rpn_head: RPNHead
bbox_head: BBoxHead
# post process
bbox_post_process: BBoxPostProcess
HRNet:
width: 18
freeze_at: 0
return_idx: [0, 1, 2, 3]
HRFPN:
out_channel: 256
share_conv: false
RPNHead:
anchor_generator:
aspect_ratios: [0.5, 1.0, 2.0]
anchor_sizes: [[32], [64], [128], [256], [512]]
strides: [4, 8, 16, 32, 64]
rpn_target_assign:
batch_size_per_im: 256
fg_fraction: 0.5
negative_overlap: 0.3
positive_overlap: 0.7
use_random: True
train_proposal:
min_size: 0.0
nms_thresh: 0.7
pre_nms_top_n: 2000
post_nms_top_n: 2000
topk_after_collect: True
test_proposal:
min_size: 0.0
nms_thresh: 0.7
pre_nms_top_n: 1000
post_nms_top_n: 1000
BBoxHead:
head: TwoFCHead
roi_extractor:
resolution: 7
sampling_ratio: 0
aligned: True
bbox_assigner: BBoxAssigner
BBoxAssigner:
batch_size_per_im: 512
bg_thresh: 0.5
fg_thresh: 0.5
fg_fraction: 0.25
use_random: True
TwoFCHead:
out_channel: 1024
BBoxPostProcess:
decode: RCNNBox
nms:
name: MultiClassNMS
keep_top_k: 100
score_threshold: 0.05
nms_threshold: 0.5
| PaddleDetection/configs/hrnet/_base_/faster_rcnn_hrnetv2p_w18.yml/0 | {
"file_path": "PaddleDetection/configs/hrnet/_base_/faster_rcnn_hrnetv2p_w18.yml",
"repo_id": "PaddleDetection",
"token_count": 631
} | 19 |
English | [简体中文](README.md)
# MOT (Multi-Object Tracking)
## Table of Contents
- [Introduction](#Introduction)
- [Installation](#Installation)
- [Model Zoo](#Model_Zoo)
- [Dataset Preparation](#Dataset_Preparation)
- [Citations](#Citations)
## Introduction
The current mainstream of 'Tracking By Detecting' multi-object tracking (MOT) algorithm is mainly composed of two parts: detection and embedding. Detection aims to detect the potential targets in each frame of the video. Embedding assigns and updates the detected target to the corresponding track (named ReID task). According to the different implementation of these two parts, it can be divided into **SDE** series and **JDE** series algorithm.
- **SDE** (Separate Detection and Embedding) is a kind of algorithm which completely separates Detection and Embedding. The most representative is **DeepSORT** algorithm. This design can make the system fit any kind of detectors without difference, and can be improved for each part separately. However, due to the series process, the speed is slow. Time-consuming is a great challenge in the construction of real-time MOT system.
- **JDE** (Joint Detection and Embedding) is to learn detection and embedding simultaneously in a shared neural network, and set the loss function with a multi task learning approach. The representative algorithms are **JDE** and **FairMOT**. This design can achieve high-precision real-time MOT performance.
Paddledetection implements three MOT algorithms of these two series, they are [DeepSORT](https://arxiv.org/abs/1812.00442) of SDE algorithm, and [JDE](https://arxiv.org/abs/1909.12605),[FairMOT](https://arxiv.org/abs/2004.01888) of JDE algorithm.
### PP-Tracking real-time MOT system
In addition, PaddleDetection also provides [PP-Tracking](../../deploy/pptracking/README.md) real-time multi-object tracking system.
PP-Tracking is the first open source real-time Multi-Object Tracking system, and it is based on PaddlePaddle deep learning framework. It has rich models, wide application and high efficiency deployment.
PP-Tracking supports two paradigms: single camera tracking (MOT) and multi-camera tracking (MTMCT). Aiming at the difficulties and pain points of actual business, PP-Tracking provides various MOT functions and applications such as pedestrian tracking, vehicle tracking, multi-class tracking, small object tracking, traffic statistics and multi-camera tracking. The deployment method supports API and GUI visual interface, and the deployment language supports Python and C++, The deployment platform environment supports Linux, NVIDIA Jetson, etc.
### AI studio public project tutorial
PP-tracking provides an AI studio public project tutorial. Please refer to this [tutorial](https://aistudio.baidu.com/aistudio/projectdetail/3022582).
### Python predict and deployment
PP-Tracking supports Python predict and deployment. Please refer to this [doc](../../deploy/pptracking/python/README.md).
### C++ predict and deployment
PP-Tracking supports C++ predict and deployment. Please refer to this [doc](../../deploy/pptracking/cpp/README.md).
### GUI predict and deployment
PP-Tracking supports GUI predict and deployment. Please refer to this [doc](https://github.com/yangyudong2020/PP-Tracking_GUi).
<div width="1000" align="center">
<img src="../../docs/images/pptracking_en.png"/>
</div>
<div width="1000" align="center">
<img src="https://user-images.githubusercontent.com/22989727/205546999-f847183d-73e5-4abe-9896-ce6a245efc79.gif"/>
<br>
video source:VisDrone, BDD100K dataset</div>
</div>
## Installation
Install all the related dependencies for MOT:
```
pip install lap motmetrics sklearn
or
pip install -r requirements.txt
```
**Notes:**
- Please make sure that [ffmpeg](https://ffmpeg.org/ffmpeg.html) is installed first, on Linux(Ubuntu) platform you can directly install it by the following command:`apt-get update && apt-get install -y ffmpeg`.
## Model Zoo
- Base models
- [ByteTrack](bytetrack/README.md)
- [OC-SORT](ocsort/README.md)
- [BoT-SORT](botsort/README.md)
- [DeepSORT](deepsort/README.md)
- [JDE](jde/README.md)
- [FairMOT](fairmot/README.md)
- [CenterTrack](centertrack/README.md)
- Feature models
- [Pedestrian](pedestrian/README.md)
- [Head](headtracking21/README.md)
- [Vehicle](vehicle/README.md)
- Multi-Class Tracking
- [MCFairMOT](mcfairmot/README.md)
- Multi-Target Multi-Camera Tracking
- [MTMCT](mtmct/README.md)
## Dataset Preparation
### MOT Dataset
PaddleDetection implement [JDE](https://github.com/Zhongdao/Towards-Realtime-MOT) and [FairMOT](https://github.com/ifzhang/FairMOT), and use the same training data named 'MIX' as them, including **Caltech Pedestrian, CityPersons, CUHK-SYSU, PRW, ETHZ, MOT17 and MOT16**. The former six are used as the mixed dataset for training, and MOT16 are used as the evaluation dataset. If you want to use these datasets, please **follow their licenses**.
**Notes:**
- Multi-Object Tracking(MOT) datasets are always used for single category tracking. DeepSORT, JDE and FairMOT are single category MOT models. 'MIX' dataset and it's sub datasets are also single category pedestrian tracking datasets. It can be considered that there are additional IDs ground truth for detection datasets.
- In order to train the feature models of more scenes, more datasets are also processed into the same format as the MIX dataset. PaddleDetection Team also provides feature datasets and models of [vehicle tracking](vehicle/README.md), [head tracking](headtracking21/README.md) and more general [pedestrian tracking](pedestrian/README.md). User defined datasets can also be prepared by referring to data preparation [doc](../../docs/tutorials/data/PrepareMOTDataSet.md).
- The multipe category MOT model is [MCFairMOT] (mcfairmot/readme_cn.md), and the multi category dataset is the integrated version of VisDrone dataset. Please refer to the doc of [MCFairMOT](mcfairmot/README.md).
- The Multi-Target Multi-Camera Tracking (MTMCT) model is [AIC21 MTMCT](https://www.aicitychallenge.org)(CityFlow) Multi-Camera Vehicle Tracking dataset. The dataset and model can refer to the doc of [MTMCT](mtmct/README.md)
### Dataset Directory
First, download the image_lists.zip using the following command, and unzip them into `PaddleDetection/dataset/mot`:
```
wget https://bj.bcebos.com/v1/paddledet/data/mot/image_lists.zip
```
Then, download the MIX dataset using the following command, and unzip them into `PaddleDetection/dataset/mot`:
```
wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT17.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/Caltech.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/CUHKSYSU.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/PRW.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/Cityscapes.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/ETHZ.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT16.zip
```
The final directory is:
```
dataset/mot
|——————image_lists
|——————caltech.10k.val
|——————caltech.all
|——————caltech.train
|——————caltech.val
|——————citypersons.train
|——————citypersons.val
|——————cuhksysu.train
|——————cuhksysu.val
|——————eth.train
|——————mot16.train
|——————mot17.train
|——————prw.train
|——————prw.val
|——————Caltech
|——————Cityscapes
|——————CUHKSYSU
|——————ETHZ
|——————MOT16
|——————MOT17
|——————PRW
```
### Data Format
These several relevant datasets have the following structure:
```
MOT17
|——————images
| └——————train
| └——————test
└——————labels_with_ids
└——————train
```
Annotations of these datasets are provided in a unified format. Every image has a corresponding annotation text. Given an image path, the annotation text path can be generated by replacing the string `images` with `labels_with_ids` and replacing `.jpg` with `.txt`.
In the annotation text, each line is describing a bounding box and has the following format:
```
[class] [identity] [x_center] [y_center] [width] [height]
```
**Notes:**
- `class` is the class id, support single class and multi-class, start from `0`, and for single class is `0`.
- `identity` is an integer from `1` to `num_identities`(`num_identities` is the total number of instances of objects in the dataset of all videos or image squences), or `-1` if this box has no identity annotation.
- `[x_center] [y_center] [width] [height]` are the center coordinates, width and height, note that they are normalized by the width/height of the image, so they are floating point numbers ranging from 0 to 1.
## Citations
```
@inproceedings{Wojke2017simple,
title={Simple Online and Realtime Tracking with a Deep Association Metric},
author={Wojke, Nicolai and Bewley, Alex and Paulus, Dietrich},
booktitle={2017 IEEE International Conference on Image Processing (ICIP)},
year={2017},
pages={3645--3649},
organization={IEEE},
doi={10.1109/ICIP.2017.8296962}
}
@inproceedings{Wojke2018deep,
title={Deep Cosine Metric Learning for Person Re-identification},
author={Wojke, Nicolai and Bewley, Alex},
booktitle={2018 IEEE Winter Conference on Applications of Computer Vision (WACV)},
year={2018},
pages={748--756},
organization={IEEE},
doi={10.1109/WACV.2018.00087}
}
@article{wang2019towards,
title={Towards Real-Time Multi-Object Tracking},
author={Wang, Zhongdao and Zheng, Liang and Liu, Yixuan and Wang, Shengjin},
journal={arXiv preprint arXiv:1909.12605},
year={2019}
}
@article{zhang2020fair,
title={FairMOT: On the Fairness of Detection and Re-Identification in Multiple Object Tracking},
author={Zhang, Yifu and Wang, Chunyu and Wang, Xinggang and Zeng, Wenjun and Liu, Wenyu},
journal={arXiv preprint arXiv:2004.01888},
year={2020}
}
@article{zhang2021bytetrack,
title={ByteTrack: Multi-Object Tracking by Associating Every Detection Box},
author={Zhang, Yifu and Sun, Peize and Jiang, Yi and Yu, Dongdong and Yuan, Zehuan and Luo, Ping and Liu, Wenyu and Wang, Xinggang},
journal={arXiv preprint arXiv:2110.06864},
year={2021}
}
@article{cao2022observation,
title={Observation-Centric SORT: Rethinking SORT for Robust Multi-Object Tracking},
author={Cao, Jinkun and Weng, Xinshuo and Khirodkar, Rawal and Pang, Jiangmiao and Kitani, Kris},
journal={arXiv preprint arXiv:2203.14360},
year={2022}
}
@article{aharon2022bot,
title={BoT-SORT: Robust Associations Multi-Pedestrian Tracking},
author={Aharon, Nir and Orfaig, Roy and Bobrovsky, Ben-Zion},
journal={arXiv preprint arXiv:2206.14651},
year={2022}
}
@article{zhou2020tracking,
title={Tracking Objects as Points},
author={Zhou, Xingyi and Koltun, Vladlen and Kr{\"a}henb{\"u}hl, Philipp},
journal={ECCV},
year={2020}
}
```
| PaddleDetection/configs/mot/README_en.md/0 | {
"file_path": "PaddleDetection/configs/mot/README_en.md",
"repo_id": "PaddleDetection",
"token_count": 3593
} | 20 |
# This config is an assembled config for ByteTrack MOT, used as eval/infer mode for MOT.
_BASE_: [
'detector/yolov3_darknet53_40e_608x608_mot17half.yml',
'_base_/mot17.yml',
'_base_/yolov3_mot_reader_608x608.yml'
]
weights: output/bytetrack_yolov3/model_final
log_iter: 20
snapshot_epoch: 2
metric: MOT # eval/infer mode
num_classes: 1
architecture: ByteTrack
pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/yolov3_darknet53_270e_coco.pdparams
ByteTrack:
detector: YOLOv3 # General YOLOv3 version
reid: None
tracker: JDETracker
det_weights: https://bj.bcebos.com/v1/paddledet/models/mot/yolov3_darknet53_40e_608x608_mot17half.pdparams
reid_weights: None
YOLOv3:
backbone: DarkNet
neck: YOLOv3FPN
yolo_head: YOLOv3Head
post_process: BBoxPostProcess
# Tracking requires higher quality boxes, so NMS score_threshold will be higher
BBoxPostProcess:
decode:
name: YOLOBox
conf_thresh: 0.005
downsample_ratio: 32
clip_bbox: true
nms:
name: MultiClassNMS
keep_top_k: 100
score_threshold: 0.01
nms_threshold: 0.45
nms_top_k: 1000
# BYTETracker
JDETracker:
use_byte: True
match_thres: 0.9
conf_thres: 0.2
low_conf_thres: 0.1
min_box_area: 100
vertical_ratio: 1.6 # for pedestrian
| PaddleDetection/configs/mot/bytetrack/bytetrack_yolov3.yml/0 | {
"file_path": "PaddleDetection/configs/mot/bytetrack/bytetrack_yolov3.yml",
"repo_id": "PaddleDetection",
"token_count": 547
} | 21 |
architecture: JDE
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/DarkNet53_pretrained.pdparams
find_unused_parameters: True
JDE:
detector: YOLOv3
reid: JDEEmbeddingHead
tracker: JDETracker
YOLOv3:
backbone: DarkNet
neck: YOLOv3FPN
yolo_head: YOLOv3Head
post_process: JDEBBoxPostProcess
for_mot: True
DarkNet:
depth: 53
return_idx: [2, 3, 4]
freeze_norm: True
YOLOv3FPN:
freeze_norm: True
YOLOv3Head:
anchors: [[128,384], [180,540], [256,640], [512,640],
[32,96], [45,135], [64,192], [90,271],
[8,24], [11,34], [16,48], [23,68]]
anchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
loss: JDEDetectionLoss
JDEBBoxPostProcess:
decode:
name: JDEBox
conf_thresh: 0.3
downsample_ratio: 32
nms:
name: MultiClassNMS
keep_top_k: 500
score_threshold: 0.01
nms_threshold: 0.5
nms_top_k: 2000
normalized: true
JDEEmbeddingHead:
anchor_levels: 3
anchor_scales: 4
embedding_dim: 512
emb_loss: JDEEmbeddingLoss
jde_loss: JDELoss
JDETracker:
det_thresh: 0.3
track_buffer: 30
min_box_area: 200
vertical_ratio: 1.6 # for pedestrian
| PaddleDetection/configs/mot/jde/_base_/jde_darknet53.yml/0 | {
"file_path": "PaddleDetection/configs/mot/jde/_base_/jde_darknet53.yml",
"repo_id": "PaddleDetection",
"token_count": 547
} | 22 |
English | [简体中文](README.md)
# PP-PicoDet

## News
- Released a new series of PP-PicoDet models: **(2022.03.20)**
- (1) It was used TAL/ETA Head and optimized PAN, which greatly improved the accuracy;
- (2) Moreover optimized CPU prediction speed, and the training speed is greatly improved;
- (3) The export model includes post-processing, and the prediction directly outputs the result, without secondary development, and the migration cost is lower.
### Legacy Model
- Please refer to: [PicoDet 2021.10](./legacy_model/)
## Introduction
We developed a series of lightweight models, named `PP-PicoDet`. Because of the excellent performance, our models are very suitable for deployment on mobile or CPU. For more details, please refer to our [report on arXiv](https://arxiv.org/abs/2111.00902).
- 🌟 Higher mAP: the **first** object detectors that surpass mAP(0.5:0.95) **30+** within 1M parameters when the input size is 416.
- 🚀 Faster latency: 150FPS on mobile ARM CPU.
- 😊 Deploy friendly: support PaddleLite/MNN/NCNN/OpenVINO and provide C++/Python/Android implementation.
- 😍 Advanced algorithm: use the most advanced algorithms and offer innovation, such as ESNet, CSP-PAN, SimOTA with VFL, etc.
<div align="center">
<img src="../../docs/images/picodet_map.png" width='600'/>
</div>
## Benchmark
| Model | Input size | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | Params<br><sup>(M) | FLOPS<br><sup>(G) | Latency<sup><small>[CPU](#latency)</small><sup><br><sup>(ms) | Latency<sup><small>[Lite](#latency)</small><sup><br><sup>(ms) | Weight | Config | Inference Model |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: | :----------------------------------------: | :--------------------------------------- | :--------------------------------------- |
| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 3.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_320_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_xs_320_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_xs_320_coco_lcnet_non_postprocess.tar) |
| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 6.1ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_416_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_xs_416_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_xs_416_coco_lcnet_non_postprocess.tar) |
| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 4.8ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_s_320_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_s_320_coco_lcnet_non_postprocess.tar) |
| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 6.6ms | 15.20ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_s_416_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_s_416_coco_lcnet_non_postprocess.tar) |
| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 8.2ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_m_320_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_m_320_coco_lcnet_non_postprocess.tar) |
| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 12.7ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_m_416_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_m_416_coco_lcnet_non_postprocess.tar) |
| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 11.5ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_l_320_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_l_320_coco_lcnet_non_postprocess.tar) |
| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 20.7ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_l_416_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_l_416_coco_lcnet_non_postprocess.tar) |
| PicoDet-L | 640*640 | 42.6 | 59.2 | 5.80 | 16.81 | 62.5ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_l_640_coco_lcnet.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_l_640_coco_lcnet_non_postprocess.tar) |
<details open>
<summary><b>Table Notes:</b></summary>
- <a name="latency">Latency:</a> All our models test on `Intel core i7 10750H` CPU with MKLDNN by 12 threads and `Qualcomm Snapdragon 865(4xA77+4xA55)` with 4 threads by arm8 and with FP16. In the above table, test CPU latency on Paddle-Inference and testing Mobile latency with `Lite`->[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite).
- PicoDet is trained on COCO train2017 dataset and evaluated on COCO val2017. And PicoDet used 4 GPUs for training and all checkpoints are trained with default settings and hyperparameters.
- Benchmark test: When testing the speed benchmark, the post-processing is not included in the exported model, you need to set `-o export.benchmark=True` or manually modify [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12).
</details>
#### Benchmark of Other Models
| Model | Input size | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | Params<br><sup>(M) | FLOPS<br><sup>(G) | Latency<sup><small>[NCNN](#latency)</small><sup><br><sup>(ms) |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: |
| YOLOv3-Tiny | 416*416 | 16.6 | 33.1 | 8.86 | 5.62 | 25.42 |
| YOLOv4-Tiny | 416*416 | 21.7 | 40.2 | 6.06 | 6.96 | 23.69 |
| PP-YOLO-Tiny | 320*320 | 20.6 | - | 1.08 | 0.58 | 6.75 |
| PP-YOLO-Tiny | 416*416 | 22.7 | - | 1.08 | 1.02 | 10.48 |
| Nanodet-M | 320*320 | 20.6 | - | 0.95 | 0.72 | 8.71 |
| Nanodet-M | 416*416 | 23.5 | - | 0.95 | 1.2 | 13.35 |
| Nanodet-M 1.5x | 416*416 | 26.8 | - | 2.08 | 2.42 | 15.83 |
| YOLOX-Nano | 416*416 | 25.8 | - | 0.91 | 1.08 | 19.23 |
| YOLOX-Tiny | 416*416 | 32.8 | - | 5.06 | 6.45 | 32.77 |
| YOLOv5n | 640*640 | 28.4 | 46.0 | 1.9 | 4.5 | 40.35 |
| YOLOv5s | 640*640 | 37.2 | 56.0 | 7.2 | 16.5 | 78.05 |
- Testing Mobile latency with code: [MobileDetBenchmark](https://github.com/JiweiMaster/MobileDetBenchmark).
## Quick Start
<details open>
<summary>Requirements:</summary>
- PaddlePaddle >= 2.2.2
</details>
<details>
<summary>Installation</summary>
- [Installation guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md)
- [Prepare dataset](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/data/PrepareDataSet_en.md)
</details>
<details>
<summary>Training and Evaluation</summary>
- Training model on single-GPU:
```shell
# training on single-GPU
export CUDA_VISIBLE_DEVICES=0
python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval
```
If the GPU is out of memory during training, reduce the batch_size in TrainReader, and reduce the base_lr in LearningRate proportionally. At the same time, the configs we published are all trained with 4 GPUs. If the number of GPUs is changed to 1, the base_lr needs to be reduced by a factor of 4.
- Training model on multi-GPU:
```shell
# training on multi-GPU
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval
```
- Evaluation:
```shell
python tools/eval.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
```
- Infer:
```shell
python tools/infer.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams
```
Detail also can refer to [Quick start guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md).
</details>
## Deployment
### Export and Convert Model
<details open>
<summary>1. Export model</summary>
```shell
cd PaddleDetection
python tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams \
--output_dir=output_inference
```
- If no post processing is required, please specify: `-o export.post_process=False` (if -o has already appeared, delete -o here) or manually modify corresponding fields in [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml).
- If no NMS is required, please specify: `-o export.nms=True` or manually modify corresponding fields in [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml). Many scenes exported to ONNX only support single input and fixed shape output, so if exporting to ONNX, it is recommended not to export NMS.
</details>
<details>
<summary>2. Convert to PaddleLite (click to expand)</summary>
- Install Paddlelite>=2.10:
```shell
pip install paddlelite
```
- Convert model:
```shell
# FP32
paddle_lite_opt --model_dir=output_inference/picodet_s_320_coco_lcnet --valid_targets=arm --optimize_out=picodet_s_320_coco_fp32
# FP16
paddle_lite_opt --model_dir=output_inference/picodet_s_320_coco_lcnet --valid_targets=arm --optimize_out=picodet_s_320_coco_fp16 --enable_fp16=true
```
</details>
<details>
<summary>3. Convert to ONNX (click to expand)</summary>
- Install [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) >= 0.7 and ONNX > 1.10.1, for details, please refer to [Tutorials of Export ONNX Model](../../deploy/EXPORT_ONNX_MODEL.md)
```shell
pip install onnx
pip install paddle2onnx==0.9.2
```
- Convert model:
```shell
paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \
--model_filename model.pdmodel \
--params_filename model.pdiparams \
--opset_version 11 \
--save_file picodet_s_320_coco.onnx
```
- Simplify ONNX model: use onnx-simplifier to simplify onnx model.
- Install onnxsim >= 0.4.1:
```shell
pip install onnxsim
```
- simplify onnx model:
```shell
onnxsim picodet_s_320_coco.onnx picodet_s_processed.onnx
```
</details>
- Deploy models
| Model | Input size | ONNX(w/o postprocess) | Paddle Lite(fp32) | Paddle Lite(fp16) |
| :-------- | :--------: | :---------------------: | :----------------: | :----------------: |
| PicoDet-XS | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_320_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_320_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_320_coco_lcnet_fp16.tar) |
| PicoDet-XS | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_416_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_416_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_416_coco_lcnet_fp16.tar) |
| PicoDet-S | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_coco_lcnet_fp16.tar) |
| PicoDet-S | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_coco_lcnet_fp16.tar) |
| PicoDet-M | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320_coco_lcnet_fp16.tar) |
| PicoDet-M | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416_coco_lcnet_fp16.tar) |
| PicoDet-L | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320_coco_lcnet_fp16.tar) |
| PicoDet-L | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_coco_lcnet_fp16.tar) |
| PicoDet-L | 640*640 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_640_lcnet_postprocessed.onnx) | [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_640_coco_lcnet.onnx) [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640_coco_lcnet.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640_coco_lcnet_fp16.tar) |
### Deploy
| Infer Engine | Python | C++ | Predict With Postprocess |
| :-------- | :--------: | :---------------------: | :----------------: |
| OpenVINO | [Python](../../deploy/third_engine/demo_openvino/python) | [C++](../../deploy/third_engine/demo_openvino)(postprocess coming soon) | ✔︎ |
| Paddle Lite | - | [C++](../../deploy/lite) | ✔︎ |
| Android Demo | - | [Paddle Lite](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/android/app/cxx/picodet_detection_demo) | ✔︎ |
| PaddleInference | [Python](../../deploy/python) | [C++](../../deploy/cpp) | ✔︎ |
| ONNXRuntime | [Python](../../deploy/third_engine/demo_onnxruntime) | Coming soon | ✔︎ |
| NCNN | Coming soon | [C++](../../deploy/third_engine/demo_ncnn) | ✘ |
| MNN | Coming soon | [C++](../../deploy/third_engine/demo_mnn) | ✘ |
Android demo visualization:
<div align="center">
<img src="../../docs/images/picodet_android_demo1.jpg" height="500px" ><img src="../../docs/images/picodet_android_demo2.jpg" height="500px" ><img src="../../docs/images/picodet_android_demo3.jpg" height="500px" ><img src="../../docs/images/picodet_android_demo4.jpg" height="500px" >
</div>
## Quantization
<details open>
<summary>Requirements:</summary>
- PaddlePaddle >= 2.2.2
- PaddleSlim >= 2.2.2
**Install:**
```shell
pip install paddleslim==2.2.2
```
</details>
<details open>
<summary>Quant aware</summary>
Configure the quant config and start training:
```shell
python tools/train.py -c configs/picodet/picodet_s_416_coco_lcnet.yml \
--slim_config configs/slim/quant/picodet_s_416_lcnet_quant.yml --eval
```
- More detail can refer to [slim document](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim)
</details>
- Quant Aware Model ZOO:
| Quant Model | Input size | mAP<sup>val<br>0.5:0.95 | Configs | Weight | Inference Model | Paddle Lite(INT8) |
| :-------- | :--------: | :--------------------: | :-------: | :----------------: | :----------------: | :----------------: |
| PicoDet-S | 416*416 | 31.5 | [config](./picodet_s_416_coco_lcnet.yml) | [slim config](../slim/quant/picodet_s_416_lcnet_quant.yml) | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet_quant.pdparams) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_s_416_coco_lcnet_quant.tar) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/Inference/picodet_s_416_coco_lcnet_quant_non_postprocess.tar) | [w/ postprocess](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_coco_lcnet_quant.nb) | [w/o postprocess](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_coco_lcnet_quant_non_postprocess.nb) |
## Unstructured Pruning
<details open>
<summary>Tutorial:</summary>
Please refer this [documentation](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/legacy_model/pruner/README.md) for details such as requirements, training and deployment.
</details>
## Application
- **Pedestrian detection:** model zoo of `PicoDet-S-Pedestrian` please refer to [PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B)
- **Mainbody detection:** model zoo of `PicoDet-L-Mainbody` please refer to [mainbody detection](./legacy_model/application/mainbody_detection/README.md)
## FAQ
<details>
<summary>Out of memory error.</summary>
Please reduce the `batch_size` of `TrainReader` in config.
</details>
<details>
<summary>How to transfer learning.</summary>
Please reset `pretrain_weights` in config, which trained on coco. Such as:
```yaml
pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams
```
</details>
<details>
<summary>The transpose operator is time-consuming on some hardware.</summary>
Please use `PicoDet-LCNet` model, which has fewer `transpose` operators.
</details>
<details>
<summary>How to count model parameters.</summary>
You can insert below code at [here](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) to count learnable parameters.
```python
params = sum([
p.numel() for n, p in self.model. named_parameters()
if all([x not in n for x in ['_mean', '_variance']])
]) # exclude BatchNorm running status
print('params: ', params)
```
</details>
## Cite PP-PicoDet
If you use PicoDet in your research, please cite our work by using the following BibTeX entry:
```
@misc{yu2021pppicodet,
title={PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices},
author={Guanghua Yu and Qinyao Chang and Wenyu Lv and Chang Xu and Cheng Cui and Wei Ji and Qingqing Dang and Kaipeng Deng and Guanzhong Wang and Yuning Du and Baohua Lai and Qiwen Liu and Xiaoguang Hu and Dianhai Yu and Yanjun Ma},
year={2021},
eprint={2111.00902},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
| PaddleDetection/configs/picodet/README_en.md/0 | {
"file_path": "PaddleDetection/configs/picodet/README_en.md",
"repo_id": "PaddleDetection",
"token_count": 11217
} | 23 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'../ppyoloe/_base_/optimizer_300e.yml',
'../ppyoloe/_base_/ppyoloe_crn.yml',
'../ppyoloe/_base_/ppyoloe_reader.yml',
]
log_iter: 100
snapshot_epoch: 4
weights: output/ppyoloe_crn_l_36e_uadetrac/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams
depth_mult: 1.0
width_mult: 1.0
num_classes: 4
TrainDataset:
!COCODataSet
image_dir: train
anno_path: annotations/train.json
dataset_dir: dataset/uadetrac
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
image_dir: val
anno_path: annotations/test.json
dataset_dir: dataset/uadetrac
TestDataset:
!ImageFolder
anno_path: annotations/test.json
dataset_dir: dataset/uadetrac
TrainReader:
batch_size: 8
epoch: 36
LearningRate:
base_lr: 0.001
schedulers:
- !CosineDecay
max_epochs: 43
- !LinearWarmup
start_factor: 0.
epochs: 1
PPYOLOEHead:
static_assigner_epoch: -1
nms:
name: MultiClassNMS
nms_top_k: 1000
keep_top_k: 100
score_threshold: 0.01
nms_threshold: 0.6
| PaddleDetection/configs/ppvehicle/ppyoloe_crn_l_36e_uadetrac.yml/0 | {
"file_path": "PaddleDetection/configs/ppvehicle/ppyoloe_crn_l_36e_uadetrac.yml",
"repo_id": "PaddleDetection",
"token_count": 549
} | 24 |
architecture: YOLOv3
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_large_x1_0_ssld_pretrained.pdparams
norm_type: sync_bn
use_ema: true
ema_decay: 0.9998
YOLOv3:
backbone: MobileNetV3
neck: PPYOLOFPN
yolo_head: YOLOv3Head
post_process: BBoxPostProcess
MobileNetV3:
model_name: large
scale: 1.
with_extra_blocks: false
extra_block_filters: []
feature_maps: [13, 16]
PPYOLOFPN:
in_channels: [160, 368]
coord_conv: true
conv_block_num: 0
spp: true
drop_block: true
YOLOv3Head:
anchors: [[11, 18], [34, 47], [51, 126],
[115, 71], [120, 195], [254, 235]]
anchor_masks: [[3, 4, 5], [0, 1, 2]]
loss: YOLOv3Loss
YOLOv3Loss:
ignore_thresh: 0.5
downsample: [32, 16]
label_smooth: false
scale_x_y: 1.05
iou_loss: IouLoss
IouLoss:
loss_weight: 2.5
loss_square: true
BBoxPostProcess:
decode:
name: YOLOBox
conf_thresh: 0.005
downsample_ratio: 32
clip_bbox: true
scale_x_y: 1.05
nms:
name: MultiClassNMS
keep_top_k: 100
nms_threshold: 0.45
nms_top_k: 1000
score_threshold: 0.005
| PaddleDetection/configs/ppyolo/_base_/ppyolo_mbv3_large.yml/0 | {
"file_path": "PaddleDetection/configs/ppyolo/_base_/ppyolo_mbv3_large.yml",
"repo_id": "PaddleDetection",
"token_count": 529
} | 25 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'./_base_/ppyolo_r50vd_dcn.yml',
'./_base_/optimizer_1x.yml',
'./_base_/ppyolo_reader.yml',
]
snapshot_epoch: 16
EvalDataset:
!COCODataSet
image_dir: test2017
anno_path: annotations/image_info_test-dev2017.json
dataset_dir: dataset/coco
| PaddleDetection/configs/ppyolo/ppyolo_test.yml/0 | {
"file_path": "PaddleDetection/configs/ppyolo/ppyolo_test.yml",
"repo_id": "PaddleDetection",
"token_count": 157
} | 26 |
_BASE_: [
'./_base_/pcb_detection.yml',
'../../runtime.yml',
'../_base_/optimizer_80e.yml',
'../_base_/ppyoloe_plus_crn.yml',
'../_base_/ppyoloe_plus_reader.yml',
]
log_iter: 100
snapshot_epoch: 5
weights: output/ppyoloe_plus_crn_m_80e_obj365_pretrained_pcb/model_final
pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_m_obj365_pretrained.pdparams
depth_mult: 0.67
width_mult: 0.75
| PaddleDetection/configs/ppyoloe/application/ppyoloe_plus_crn_m_80e_obj365_pretrained_pcb.yml/0 | {
"file_path": "PaddleDetection/configs/ppyoloe/application/ppyoloe_plus_crn_m_80e_obj365_pretrained_pcb.yml",
"repo_id": "PaddleDetection",
"token_count": 201
} | 27 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'./_base_/optimizer_300e.yml',
'./_base_/ppyoloe_crn.yml',
'./_base_/ppyoloe_reader.yml',
]
log_iter: 100
snapshot_epoch: 10
weights: output/ppyoloe_crn_m_300e_coco/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_m_pretrained.pdparams
depth_mult: 0.67
width_mult: 0.75
| PaddleDetection/configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml/0 | {
"file_path": "PaddleDetection/configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 181
} | 28 |
简体中文 | [English](README_en.md)
# PP-YOLOE-R
## 内容
- [简介](#简介)
- [模型库](#模型库)
- [使用说明](#使用说明)
- [预测部署](#预测部署)
- [附录](#附录)
- [引用](#引用)
## 简介
PP-YOLOE-R是一个高效的单阶段Anchor-free旋转框检测模型。基于PP-YOLOE, PP-YOLOE-R以极少的参数量和计算量为代价,引入了一系列有用的设计来提升检测精度。在DOTA 1.0数据集上,PP-YOLOE-R-l和PP-YOLOE-R-x在单尺度训练和测试的情况下分别达到了78.14和78.27 mAP,这超越了几乎所有的旋转框检测模型。通过多尺度训练和测试,PP-YOLOE-R-l和PP-YOLOE-R-x的检测精度进一步提升至80.02和80.73 mAP。在这种情况下,PP-YOLOE-R-x超越了所有的anchor-free方法并且和最先进的anchor-based的两阶段模型精度几乎相当。此外,PP-YOLOE-R-s和PP-YOLOE-R-m通过多尺度训练和测试可以达到79.42和79.71 mAP。考虑到这两个模型的参数量和计算量,其性能也非常卓越。在保持高精度的同时,PP-YOLOE-R避免使用特殊的算子,例如Deformable Convolution或Rotated RoI Align,以使其能轻松地部署在多种多样的硬件上。在1024x1024的输入分辨率下,PP-YOLOE-R-s/m/l/x在RTX 2080 Ti上使用TensorRT FP16分别能达到69.8/55.1/48.3/37.1 FPS,在Tesla V100上分别能达到114.5/86.8/69.7/50.7 FPS。更多细节可以参考我们的[**技术报告**](https://arxiv.org/abs/2211.02386)。
<div align="center">
<img src="../../../docs/images/ppyoloe_r_map_fps.png" width=500 />
</div>
PP-YOLOE-R相较于PP-YOLOE做了以下几点改动:
- Rotated Task Alignment Learning
- 解耦的角度预测头
- 使用DFL进行角度预测
- 可学习的门控单元
- [ProbIoU损失函数](https://arxiv.org/abs/2106.06072)
## 模型库
| 模型 | Backbone | mAP | V100 TRT FP16 (FPS) | RTX 2080 Ti TRT FP16 (FPS) | Params (M) | FLOPs (G) | 学习率策略 | 角度表示 | 数据增广 | GPU数目 | 每GPU图片数目 | 模型下载 | 配置文件 |
|:---:|:--------:|:----:|:--------------------:|:------------------------:|:----------:|:---------:|:--------:|:----------:|:-------:|:------:|:-----------:|:--------:|:------:|
| PP-YOLOE-R-s | CRN-s | 73.82 | 114.5 | 69.8 | 8.09 | 43.46 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_s_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota.yml) |
| PP-YOLOE-R-s | CRN-s | 79.42 | 114.5 | 69.8 | 8.09 | 43.46 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_s_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota_ms.yml) |
| PP-YOLOE-R-m | CRN-m | 77.64 | 86.8 | 55.1 | 23.96 |127.00 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_m_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_m_3x_dota.yml) |
| PP-YOLOE-R-m | CRN-m | 79.71 | 86.8 | 55.1 | 23.96 |127.00 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_m_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_m_3x_dota_ms.yml) |
| PP-YOLOE-R-l | CRN-l | 78.14 | 69.7 | 48.3 | 53.29 |281.65 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml) |
| PP-YOLOE-R-l | CRN-l | 80.02 | 69.7 | 48.3 | 53.29 |281.65 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota_ms.yml) |
| PP-YOLOE-R-x | CRN-x | 78.28 | 50.7 | 37.1 | 100.27|529.82 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_x_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_x_3x_dota.yml) |
| PP-YOLOE-R-x | CRN-x | 80.73 | 50.7 | 37.1 | 100.27|529.82 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_x_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_x_3x_dota_ms.yml) |
**注意:**
- 如果**GPU卡数**或者**batch size**发生了改变,你需要按照公式 **lr<sub>new</sub> = lr<sub>default</sub> * (batch_size<sub>new</sub> * GPU_number<sub>new</sub>) / (batch_size<sub>default</sub> * GPU_number<sub>default</sub>)** 调整学习率。
- 模型库中的模型默认使用单尺度训练单尺度测试。如果数据增广一栏标明MS,意味着使用多尺度训练和多尺度测试。如果数据增广一栏标明RR,意味着使用RandomRotate数据增广进行训练。
- CRN表示在PP-YOLOE中提出的CSPRepResNet
- PP-YOLOE-R的参数量和计算量是在重参数化之后计算得到,输入图像的分辨率为1024x1024
- 速度测试使用TensorRT 8.2.3在DOTA测试集中测试2000张图片计算平均值得到。参考速度测试以复现[速度测试](#速度测试)
## 使用说明
参考[数据准备](../README.md#数据准备)准备数据。
### 训练
GPU单卡训练
``` bash
CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml
```
GPU多卡训练
``` bash
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml
```
### 预测
执行以下命令预测单张图片,图片预测结果会默认保存在`output`文件夹下面
``` bash
python tools/infer.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams --infer_img=demo/P0861__1.0__1154___824.png --draw_threshold=0.5
```
### DOTA数据集评估
参考[DOTA Task](https://captain-whu.github.io/DOTA/tasks.html), 评估DOTA数据集需要生成一个包含所有检测结果的zip文件,每一类的检测结果储存在一个txt文件中,txt文件中每行格式为:`image_name score x1 y1 x2 y2 x3 y3 x4 y4`。将生成的zip文件提交到[DOTA Evaluation](https://captain-whu.github.io/DOTA/evaluation.html)的Task1进行评估。你可以执行以下命令得到test数据集的预测结果:
``` bash
python tools/infer.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams --infer_dir=/path/to/test/images --output_dir=output_ppyoloe_r --visualize=False --save_results=True
```
将预测结果处理成官网评估所需要的格式:
``` bash
python configs/rotate/tools/generate_result.py --pred_txt_dir=output_ppyoloe_r/ --output_dir=submit/ --data_type=dota10
zip -r submit.zip submit
```
### 速度测试
可以使用Paddle模式或者Paddle-TRT模式进行测速。当使用Paddle-TRT模式测速时,需要确保**TensorRT版本大于8.2, PaddlePaddle版本为develop版本**。使用Paddle-TRT进行测速,可以执行以下命令:
``` bash
# 导出模型
python tools/export_model.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams trt=True
# 速度测试
CUDA_VISIBLE_DEVICES=0 python configs/rotate/tools/inference_benchmark.py --model_dir output_inference/ppyoloe_r_crn_l_3x_dota/ --image_dir /path/to/dota/test/dir --run_mode trt_fp16
```
当只使用Paddle进行测速,可以执行以下命令:
``` bash
# 导出模型
python tools/export_model.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams
# 速度测试
CUDA_VISIBLE_DEVICES=0 python configs/rotate/tools/inference_benchmark.py --model_dir output_inference/ppyoloe_r_crn_l_3x_dota/ --image_dir /path/to/dota/test/dir --run_mode paddle
```
## 预测部署
**使用Paddle**进行部署,执行以下命令:
``` bash
# 导出模型
python tools/export_model.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams
# 预测图片
python deploy/python/infer.py --image_file demo/P0072__1.0__0___0.png --model_dir=output_inference/ppyoloe_r_crn_l_3x_dota --run_mode=paddle --device=gpu
```
**使用Paddle-TRT进行部署**,执行以下命令:
```
# 导出模型
python tools/export_model.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams trt=True
# 预测图片
python deploy/python/infer.py --image_file demo/P0072__1.0__0___0.png --model_dir=output_inference/ppyoloe_r_crn_l_3x_dota --run_mode=trt_fp16 --device=gpu
```
**注意:**
- 使用Paddle-TRT使用确保**PaddlePaddle版本为develop版本且TensorRT版本大于8.2**.
**使用ONNX Runtime进行部署**,执行以下命令:
```
# 导出模型
python tools/export_model.py -c configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams export_onnx=True
# 安装paddle2onnx
pip install paddle2onnx
# 转换成onnx模型
paddle2onnx --model_dir output_inference/ppyoloe_r_crn_l_3x_dota --model_filename model.pdmodel --params_filename model.pdiparams --opset_version 11 --save_file ppyoloe_r_crn_l_3x_dota.onnx
# 预测图片
python configs/rotate/tools/onnx_infer.py --infer_cfg output_inference/ppyoloe_r_crn_l_3x_dota/infer_cfg.yml --onnx_file ppyoloe_r_crn_l_3x_dota.onnx --image_file demo/P0072__1.0__0___0.png
```
## 附录
PP-YOLOE-R消融实验
| 模型 | mAP | 参数量(M) | FLOPs(G) |
| :-: | :-: | :------: | :------: |
| Baseline | 75.61 | 50.65 | 269.09 |
| +Rotated Task Alignment Learning | 77.24 | 50.65 | 269.09 |
| +Decoupled Angle Prediction Head | 77.78 | 52.20 | 272.72 |
| +Angle Prediction with DFL | 78.01 | 53.29 | 281.65 |
| +Learnable Gating Unit for RepVGG | 78.14 | 53.29 | 281.65 |
## 引用
```
@article{wang2022pp,
title={PP-YOLOE-R: An Efficient Anchor-Free Rotated Object Detector},
author={Wang, Xinxin and Wang, Guanzhong and Dang, Qingqing and Liu, Yi and Hu, Xiaoguang and Yu, Dianhai},
journal={arXiv preprint arXiv:2211.02386},
year={2022}
}
@article{xu2022pp,
title={PP-YOLOE: An evolved version of YOLO},
author={Xu, Shangliang and Wang, Xinxin and Lv, Wenyu and Chang, Qinyao and Cui, Cheng and Deng, Kaipeng and Wang, Guanzhong and Dang, Qingqing and Wei, Shengyu and Du, Yuning and others},
journal={arXiv preprint arXiv:2203.16250},
year={2022}
}
@article{llerena2021gaussian,
title={Gaussian Bounding Boxes and Probabilistic Intersection-over-Union for Object Detection},
author={Llerena, Jeffri M and Zeni, Luis Felipe and Kristen, Lucas N and Jung, Claudio},
journal={arXiv preprint arXiv:2106.06072},
year={2021}
}
```
| PaddleDetection/configs/rotate/ppyoloe_r/README.md/0 | {
"file_path": "PaddleDetection/configs/rotate/ppyoloe_r/README.md",
"repo_id": "PaddleDetection",
"token_count": 6041
} | 29 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'_base_/optimizer_6x.yml',
'_base_/rtdetr_r50vd.yml',
'_base_/rtdetr_reader.yml',
]
weights: output/rtdetr_focalnet_L_384_3x_coco/model_final
find_unused_parameters: True
log_iter: 100
snapshot_epoch: 2
pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/pretrained/focalnet_large_fl4_pretrained_on_o365.pdparams
DETR:
backbone: FocalNet
neck: HybridEncoder
transformer: RTDETRTransformer
detr_head: DINOHead
post_process: DETRPostProcess
FocalNet:
arch: 'focalnet_L_384_22k_fl4'
out_indices: [1, 2, 3]
HybridEncoder:
hidden_dim: 256
use_encoder_idx: [2]
num_encoder_layers: 6 #
encoder_layer:
name: TransformerLayer
d_model: 256
nhead: 8
dim_feedforward: 2048
dropout: 0.
activation: 'gelu'
expansion: 1.0
RTDETRTransformer:
num_queries: 300
position_embed_type: sine
feat_strides: [8, 16, 32]
num_levels: 3
nhead: 8
num_decoder_layers: 6
dim_feedforward: 2048 #
dropout: 0.0
activation: relu
num_denoising: 100
label_noise_ratio: 0.5
box_noise_scale: 1.0
learnt_init_query: False
query_pos_head_inv_sig: True #
DINOHead:
loss:
name: DINOLoss
loss_coeff: {class: 1, bbox: 5, giou: 2}
aux_loss: True
use_vfl: True
matcher:
name: HungarianMatcher
matcher_coeff: {class: 2, bbox: 5, giou: 2}
DETRPostProcess:
num_top_queries: 300
epoch: 36
LearningRate:
base_lr: 0.0001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [36]
use_warmup: false
OptimizerBuilder:
clip_grad_by_norm: 0.1
regularizer: false
optimizer:
type: AdamW
weight_decay: 0.0001
param_groups:
- params: ['absolute_pos_embed', 'relative_position_bias_table', 'norm']
weight_decay: 0.0
| PaddleDetection/configs/rtdetr/rtdetr_focalnet_L_384_3x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/rtdetr/rtdetr_focalnet_L_384_3x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 823
} | 30 |
# convert VOC xml to COCO format json
import xml.etree.ElementTree as ET
import os
import json
import argparse
# create and init coco json, img set, and class set
def init_json():
# create coco json
coco = dict()
coco['images'] = []
coco['type'] = 'instances'
coco['annotations'] = []
coco['categories'] = []
# voc classes
voc_class = [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
# init json categories
image_set = set()
class_set = dict()
for cat_id, cat_name in enumerate(voc_class):
cat_item = dict()
cat_item['supercategory'] = 'none'
cat_item['id'] = cat_id
cat_item['name'] = cat_name
coco['categories'].append(cat_item)
class_set[cat_name] = cat_id
return coco, class_set, image_set
def getImgItem(file_name, size, img_id):
if file_name is None:
raise Exception('Could not find filename tag in xml file.')
if size['width'] is None:
raise Exception('Could not find width tag in xml file.')
if size['height'] is None:
raise Exception('Could not find height tag in xml file.')
image_item = dict()
image_item['id'] = img_id
image_item['file_name'] = file_name
image_item['width'] = size['width']
image_item['height'] = size['height']
return image_item
def getAnnoItem(object_name, image_id, ann_id, category_id, bbox):
annotation_item = dict()
annotation_item['segmentation'] = []
seg = []
# bbox[] is x,y,w,h
# left_top
seg.append(bbox[0])
seg.append(bbox[1])
# left_bottom
seg.append(bbox[0])
seg.append(bbox[1] + bbox[3])
# right_bottom
seg.append(bbox[0] + bbox[2])
seg.append(bbox[1] + bbox[3])
# right_top
seg.append(bbox[0] + bbox[2])
seg.append(bbox[1])
annotation_item['segmentation'].append(seg)
annotation_item['area'] = bbox[2] * bbox[3]
annotation_item['iscrowd'] = 0
annotation_item['ignore'] = 0
annotation_item['image_id'] = image_id
annotation_item['bbox'] = bbox
annotation_item['category_id'] = category_id
annotation_item['id'] = ann_id
return annotation_item
def convert_voc_to_coco(txt_path, json_path, xml_path):
# create and init coco json, img set, and class set
coco_json, class_set, image_set = init_json()
### collect img and ann info into coco json
# read img_name in txt, e.g., 000005 for voc2007, 2008_000002 for voc2012
img_txt = open(txt_path, 'r')
img_line = img_txt.readline().strip()
# loop xml
img_id = 0
ann_id = 0
while img_line:
print('img_id:', img_id)
# find corresponding xml
xml_name = img_line.split('Annotations/', 1)[1]
xml_file = os.path.join(xml_path, xml_name)
if not os.path.exists(xml_file):
print('{} is not exists.'.format(xml_name))
img_line = img_txt.readline().strip()
continue
# decode xml
tree = ET.parse(xml_file)
root = tree.getroot()
if root.tag != 'annotation':
raise Exception(
'xml {} root element should be annotation, rather than {}'.
format(xml_name, root.tag))
# init img and ann info
bndbox = dict()
size = dict()
size['width'] = None
size['height'] = None
size['depth'] = None
# filename
fileNameNode = root.find('filename')
file_name = fileNameNode.text
# img size
sizeNode = root.find('size')
if not sizeNode:
raise Exception('xml {} structure broken at size tag.'.format(
xml_name))
for subNode in sizeNode:
size[subNode.tag] = int(subNode.text)
# add img into json
if file_name not in image_set:
img_id += 1
format_img_id = int("%04d" % img_id)
# print('line 120. format_img_id:', format_img_id)
image_item = getImgItem(file_name, size, img_id)
image_set.add(file_name)
coco_json['images'].append(image_item)
else:
raise Exception(' xml {} duplicated image: {}'.format(xml_name,
file_name))
### add objAnn into json
objectAnns = root.findall('object')
for objectAnn in objectAnns:
bndbox['xmin'] = None
bndbox['xmax'] = None
bndbox['ymin'] = None
bndbox['ymax'] = None
#add obj category
object_name = objectAnn.find('name').text
if object_name not in class_set:
raise Exception('xml {} Unrecognized category: {}'.format(
xml_name, object_name))
else:
current_category_id = class_set[object_name]
#add obj bbox ann
objectBboxNode = objectAnn.find('bndbox')
for coordinate in objectBboxNode:
if bndbox[coordinate.tag] is not None:
raise Exception('xml {} structure corrupted at bndbox tag.'.
format(xml_name))
bndbox[coordinate.tag] = int(float(coordinate.text))
bbox = []
# x
bbox.append(bndbox['xmin'])
# y
bbox.append(bndbox['ymin'])
# w
bbox.append(bndbox['xmax'] - bndbox['xmin'])
# h
bbox.append(bndbox['ymax'] - bndbox['ymin'])
ann_id += 1
ann_item = getAnnoItem(object_name, img_id, ann_id,
current_category_id, bbox)
coco_json['annotations'].append(ann_item)
img_line = img_txt.readline().strip()
print('Saving json.')
json.dump(coco_json, open(json_path, 'w'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--type', type=str, default='VOC2007_test', help="data type")
parser.add_argument(
'--base_path',
type=str,
default='dataset/voc/VOCdevkit',
help="base VOC path.")
args = parser.parse_args()
# image info path
txt_name = args.type + '.txt'
json_name = args.type + '.json'
txt_path = os.path.join(args.base_path, 'PseudoAnnotations', txt_name)
json_path = os.path.join(args.base_path, 'PseudoAnnotations', json_name)
# xml path
xml_path = os.path.join(args.base_path,
args.type.split('_')[0], 'Annotations')
print('txt_path:', txt_path)
print('json_path:', json_path)
print('xml_path:', xml_path)
print('Converting {} to COCO json.'.format(args.type))
convert_voc_to_coco(txt_path, json_path, xml_path)
print('Finished.')
| PaddleDetection/configs/semi_det/_base_/voc2coco.py/0 | {
"file_path": "PaddleDetection/configs/semi_det/_base_/voc2coco.py",
"repo_id": "PaddleDetection",
"token_count": 3302
} | 31 |
_BASE_: [
'../../ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml',
]
log_iter: 50
snapshot_epoch: 5
weights: output/ppyoloe_plus_crn_l_80e_coco_sup005/model_final
pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_l_obj365_pretrained.pdparams
depth_mult: 1.0
width_mult: 1.0
TrainDataset:
!COCODataSet
image_dir: train2017
anno_path: semi_annotations/instances_train2017.1@5.json
dataset_dir: dataset/coco
data_fields: ['image', 'gt_bbox', 'gt_class']
epoch: 80
LearningRate:
base_lr: 0.001
schedulers:
- !CosineDecay
max_epochs: 96
- !LinearWarmup
start_factor: 0.
epochs: 5
| PaddleDetection/configs/semi_det/baseline/ppyoloe_plus_crn_l_80e_coco_sup005.yml/0 | {
"file_path": "PaddleDetection/configs/semi_det/baseline/ppyoloe_plus_crn_l_80e_coco_sup005.yml",
"repo_id": "PaddleDetection",
"token_count": 308
} | 32 |
pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams
slim: Pruner
Pruner:
criterion: fpgm
pruned_params: ['conv2d_62.w_0', 'conv2d_63.w_0', 'conv2d_64.w_0',
'conv2d_65.w_0', 'conv2d_66.w_0', 'conv2d_67.w_0']
pruned_ratios: [0.75, 0.75, 0.75, 0.75, 0.75, 0.75]
print_params: True
| PaddleDetection/configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml/0 | {
"file_path": "PaddleDetection/configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml",
"repo_id": "PaddleDetection",
"token_count": 187
} | 33 |
# Weights of yolov3_mobilenet_v3_coco
pretrain_weights: https://paddledet.bj.bcebos.com/models/yolov3_mobilenet_v3_large_270e_coco.pdparams
slim: QAT
QAT:
quant_config: {
'activation_preprocess_type': 'PACT',
'weight_quantize_type': 'channel_wise_abs_max', 'activation_quantize_type': 'moving_average_abs_max',
'weight_bits': 8, 'activation_bits': 8, 'dtype': 'int8', 'window_size': 10000, 'moving_rate': 0.9,
'quantizable_layer_type': ['Conv2D', 'Linear']}
print_model: True
epoch: 50
LearningRate:
base_lr: 0.0001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones:
- 35
- 45
- !LinearWarmup
start_factor: 0.
steps: 1000
| PaddleDetection/configs/slim/quant/yolov3_mobilenet_v3_qat.yml/0 | {
"file_path": "PaddleDetection/configs/slim/quant/yolov3_mobilenet_v3_qat.yml",
"repo_id": "PaddleDetection",
"token_count": 297
} | 34 |
weights: output/picodet_l_1024_coco_lcnet_lvjian1/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams
worker_num: 2
eval_height: &eval_height 1024
eval_width: &eval_width 1024
eval_size: &eval_size [*eval_height, *eval_width]
metric: COCO
num_classes: 5
TrainDataset:
!COCODataSet
image_dir: images
anno_path: train.json
dataset_dir: dataset/slice_lvjian1_data/train
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
image_dir: images
anno_path: val.json
dataset_dir: dataset/slice_lvjian1_data/eval
TestDataset:
!ImageFolder
anno_path: val.json
dataset_dir: dataset/slice_lvjian1_data/eval
epoch: 50
LearningRate:
base_lr: 0.006
schedulers:
- !CosineDecay
max_epochs: 50
- !LinearWarmup
start_factor: 0.001
steps: 300
TrainReader:
sample_transforms:
- Decode: {}
- RandomCrop: {}
- RandomFlip: {prob: 0.5}
- RandomDistort: {}
batch_transforms:
- BatchRandomResize: {target_size: [960, 992, 1024, 1056, 1088], random_size: True, random_interp: True, keep_ratio: False}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
- PadGT: {}
batch_size: 8
shuffle: true
drop_last: true
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
batch_size: 8
shuffle: false
TestReader:
inputs_def:
image_shape: [1, 3, *eval_height, *eval_width]
sample_transforms:
- Decode: {}
- Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_size: 1
use_gpu: true
use_xpu: false
log_iter: 100
save_dir: output
snapshot_epoch: 10
print_flops: false
find_unused_parameters: True
use_ema: true
# Exporting the model
export:
post_process: True # Whether post-processing is included in the network when export model.
nms: True # Whether NMS is included in the network when export model.
benchmark: False # It is used to testing model performance, if set `True`, post-process and NMS will not be exported.
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.00004
type: L2
architecture: PicoDet
PicoDet:
backbone: LCNet
neck: LCPAN
head: PicoHeadV2
LCNet:
scale: 2.0
feature_maps: [3, 4, 5]
LCPAN:
out_channels: 160
use_depthwise: True
num_features: 4
PicoHeadV2:
conv_feat:
name: PicoFeat
feat_in: 160
feat_out: 160
num_convs: 4
num_fpn_stride: 4
norm_type: bn
share_cls_reg: True
use_se: True
fpn_stride: [8, 16, 32, 64]
feat_in_chan: 160
prior_prob: 0.01
reg_max: 7
cell_offset: 0.5
grid_cell_scale: 5.0
static_assigner_epoch: 100
use_align_head: True
static_assigner:
name: ATSSAssigner
topk: 9
force_gt_matching: False
assigner:
name: TaskAlignedAssigner
topk: 13
alpha: 1.0
beta: 6.0
loss_class:
name: VarifocalLoss
use_sigmoid: False
iou_weighted: True
loss_weight: 1.0
loss_dfl:
name: DistributionFocalLoss
loss_weight: 0.5
loss_bbox:
name: GIoULoss
loss_weight: 2.5
nms:
name: MultiClassNMS
nms_top_k: 1000
keep_top_k: 100
score_threshold: 0.025
nms_threshold: 0.6
| PaddleDetection/configs/smrt/picodet/picodet_l_1024_coco_lcnet_lvjian1.yml/0 | {
"file_path": "PaddleDetection/configs/smrt/picodet/picodet_l_1024_coco_lcnet_lvjian1.yml",
"repo_id": "PaddleDetection",
"token_count": 1563
} | 35 |
_BASE_: [
'../datasets/sniper_visdrone_detection.yml',
'../runtime.yml',
'../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',
'../faster_rcnn/_base_/optimizer_1x.yml',
'_base_/faster_fpn_reader.yml',
]
weights: output/faster_rcnn_r50_fpn_1x_sniper_visdrone/model_final
find_unused_parameters: true
| PaddleDetection/configs/sniper/faster_rcnn_r50_fpn_1x_sniper_visdrone.yml/0 | {
"file_path": "PaddleDetection/configs/sniper/faster_rcnn_r50_fpn_1x_sniper_visdrone.yml",
"repo_id": "PaddleDetection",
"token_count": 147
} | 36 |
worker_num: 4
TrainReader:
sample_transforms:
- Decode: {}
- RandomResize: {target_size: [[480, 1333], [512, 1333], [544, 1333], [576, 1333], [608, 1333], [640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], keep_ratio: true, interp: 1}
- RandomFlip: {prob: 0.5}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2SparseTarget: {use_padding_shape: True}
batch_size: 4
shuffle: true
drop_last: true
collate_batch: false
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2SparseTarget: {use_padding_shape: True}
batch_size: 1
shuffle: false
drop_last: false
TestReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2SparseTarget: {use_padding_shape: True}
batch_size: 1
shuffle: false
| PaddleDetection/configs/sparse_rcnn/_base_/sparse_rcnn_reader.yml/0 | {
"file_path": "PaddleDetection/configs/sparse_rcnn/_base_/sparse_rcnn_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 571
} | 37 |
worker_num: 3
TrainReader:
inputs_def:
num_max_boxes: 90
sample_transforms:
- Decode: {}
- RandomCrop: {num_attempts: 1}
- RandomFlip: {}
- Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}
- RandomDistort: {brightness: [0.875, 1.125, 0.5], random_apply: False}
- NormalizeBox: {}
- PadBox: {num_max_boxes: 90}
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: true}
- Permute: {}
batch_size: 64
shuffle: true
drop_last: true
use_shared_memory: true
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: true}
- Permute: {}
batch_size: 1
TestReader:
inputs_def:
image_shape: [3, 300, 300]
sample_transforms:
- Decode: {}
- Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: true}
- Permute: {}
batch_size: 1
| PaddleDetection/configs/ssd/_base_/ssd_r34_reader.yml/0 | {
"file_path": "PaddleDetection/configs/ssd/_base_/ssd_r34_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 494
} | 38 |
# Swin Transformer
## COCO Model Zoo
| 骨架网络 | 网络类型 | 每张GPU图片个数 | 学习率策略 |推理时间(fps) | mAP<sup>val<br>0.5:0.95 | 下载 | 配置文件 |
| :------------------- | :------------- | :-----: | :-----: | :------------: | :-----: | :-----------------------------------------------------: | :-----: |
| swin_T_224 | Faster R-CNN | 2 | 36e | ---- | 45.3 | [下载链接](https://paddledet.bj.bcebos.com/models/faster_rcnn_swin_tiny_fpn_3x_coco.pdparams) | [配置文件](./faster_rcnn_swin_tiny_fpn_3x_coco.yml) |
| swin_T_224 | PP-YOLOE+ | 8 | 36e | ---- | 44.7 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_swin_tiny_36e_coco.pdparams) | [配置文件](./ppyoloe_plus_swin_tiny_36e_coco.yml) |
## Citations
```
@article{liu2021Swin,
title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows},
author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining},
journal={arXiv preprint arXiv:2103.14030},
year={2021}
}
@inproceedings{liu2021swinv2,
title={Swin Transformer V2: Scaling Up Capacity and Resolution},
author={Ze Liu and Han Hu and Yutong Lin and Zhuliang Yao and Zhenda Xie and Yixuan Wei and Jia Ning and Yue Cao and Zheng Zhang and Li Dong and Furu Wei and Baining Guo},
booktitle={International Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2022}
}
```
| PaddleDetection/configs/swin/README.md/0 | {
"file_path": "PaddleDetection/configs/swin/README.md",
"repo_id": "PaddleDetection",
"token_count": 738
} | 39 |
epoch: 300
LearningRate:
base_lr: 0.01
schedulers:
- !CosineDecay
max_epochs: 300
min_lr_ratio: 0.05
last_plateau_epochs: 15
- !ExpWarmup
epochs: 5
OptimizerBuilder:
optimizer:
type: Momentum
momentum: 0.9
use_nesterov: True
regularizer:
factor: 0.0005
type: L2
| PaddleDetection/configs/yolox/_base_/optimizer_300e.yml/0 | {
"file_path": "PaddleDetection/configs/yolox/_base_/optimizer_300e.yml",
"repo_id": "PaddleDetection",
"token_count": 151
} | 40 |
Global:
reader_config: configs/yolov5_reader.yml
include_nms: True
Evaluation: True
model_dir: ./yolov6mt_s_400e_coco
model_filename: model.pdmodel
params_filename: model.pdiparams
Distillation:
alpha: 1.0
loss: soft_label
QuantAware:
activation_quantize_type: 'moving_average_abs_max'
quantize_op_types:
- conv2d
- depthwise_conv2d
TrainConfig:
train_iter: 8000
eval_iter: 1000
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.00003
T_max: 8000
optimizer_builder:
optimizer:
type: SGD
weight_decay: 0.00004
| PaddleDetection/deploy/auto_compression/configs/yolov6mt_s_qat_dis.yaml/0 | {
"file_path": "PaddleDetection/deploy/auto_compression/configs/yolov6mt_s_qat_dis.yaml",
"repo_id": "PaddleDetection",
"token_count": 241
} | 41 |
# Visual Studio 2019 Community CMake 编译指南
Windows 平台下,我们使用`Visual Studio 2019 Community` 进行了测试。微软从`Visual Studio 2017`开始即支持直接管理`CMake`跨平台编译项目,但是直到`2019`才提供了稳定和完全的支持,所以如果你想使用CMake管理项目编译构建,我们推荐你使用`Visual Studio 2019`环境下构建。
## 前置条件
* Visual Studio 2019 (根据Paddle预测库所使用的VS版本选择,请参考 [Visual Studio 不同版本二进制兼容性](https://docs.microsoft.com/zh-cn/cpp/porting/binary-compat-2015-2017?view=vs-2019) )
* CUDA 9.0 / CUDA 10.0,cudnn 7+ / TensorRT(仅在使用GPU版本的预测库时需要)
* CMake 3.0+ [CMake下载](https://cmake.org/download/)
**特别注意:windows下预测库需要的TensorRT版本为:**。
| 预测库版本 | TensorRT版本 |
| ---- | ---- |
| cuda10.1_cudnn7.6_avx_mkl_trt6 | TensorRT-6.0.1.5 |
| cuda10.2_cudnn7.6_avx_mkl_trt7 | TensorRT-7.0.0.11 |
| cuda11.0_cudnn8.0_avx_mkl_trt7 | TensorRT-7.2.1.6 |
请确保系统已经安装好上述基本软件,我们使用的是`VS2019`的社区版。
**下面所有示例以工作目录为 `D:\projects`演示**。
### Step1: 下载代码
下载源代码
```shell
git clone https://github.com/PaddlePaddle/PaddleDetection.git
```
**说明**:其中`C++`预测代码在`PaddleDetection/deploy/cpp` 目录,该目录不依赖任何`PaddleDetection`下其他目录。
### Step2: 下载PaddlePaddle C++ 预测库 paddle_inference
PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#windows)
解压后`D:\projects\paddle_inference`目录包含内容为:
```
paddle_inference
├── paddle # paddle核心库和头文件
|
├── third_party # 第三方依赖库和头文件
|
└── version.txt # 版本和编译信息
```
### Step3: 安装配置OpenCV
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本, [下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download)
2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv`
3. 配置环境变量,如下流程所示(如果使用全局绝对路径,可以不用设置环境变量)
- 我的电脑->属性->高级系统设置->环境变量
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
### Step4: 编译
1. 进入到`cpp`文件夹
```
cd D:\projects\PaddleDetection\deploy\cpp
```
2. 使用CMake生成项目文件
编译参数的含义说明如下(带`*`表示仅在使用**GPU版本**预测库时指定, 其中CUDA库版本尽量对齐,**使用9.0、10.0版本,不使用9.2、10.1等版本CUDA库**):
| 参数名 | 含义 |
| ---- | ---- |
| *CUDA_LIB | CUDA的库路径 |
| *CUDNN_LIB | CUDNN的库路径 |
| OPENCV_DIR | OpenCV的安装路径, |
| PADDLE_DIR | Paddle预测库的路径 |
| PADDLE_LIB_NAME | Paddle 预测库名称 |
**注意:**
1. 如果编译环境为CPU,需要下载`CPU`版预测库,请把`WITH_GPU`的勾去掉
2. 如果使用的是`openblas`版本,请把`WITH_MKL`勾去掉
3. 如无需使用关键点模型可以把`WITH_KEYPOINT`勾去掉
4. Windows环境下,`PADDLE_LIB_NAME`需要设置为`paddle_inference`
执行如下命令项目文件:
```
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=path_to_opencv -DWITH_KEYPOINT=ON
```
例如:
```
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=D:\projects\packages\cuda10_0\lib\x64 -DCUDNN_LIB=D:\projects\packages\cuda10_0\lib\x64 -DPADDLE_DIR=D:\projects\packages\paddle_inference -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=D:\projects\packages\opencv3_4_6 -DWITH_KEYPOINT=ON
```
3. 编译
用`Visual Studio 16 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,将编译模式设置为`Release`,点击`生成`->`全部生成
### Step5: 预测及可视化
上述`Visual Studio 2019`编译产出的可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
```
cd D:\projects\PaddleDetection\deploy\cpp\out\build\x64-Release
```
可执行文件`main`即为样例的预测程序,其主要的命令行参数如下:
| 参数 | 说明 |
| ---- | ---- |
| --model_dir | 导出的检测预测模型所在路径 |
| --model_dir_keypoint | Option | 导出的关键点预测模型所在路径 |
| --image_file | 要预测的图片文件路径 |
| --image_dir | 要预测的图片文件夹路径 |
| --video_file | 要预测的视频文件路径 |
| --camera_id | Option | 用来预测的摄像头ID,默认为-1(表示不使用摄像头预测)|
| --device | 运行时的设备,可选择`CPU/GPU/XPU`,默认为`CPU`|
| --gpu_id | 指定进行推理的GPU device id(默认值为0)|
| --run_mode | 使用GPU时,默认为paddle, 可选(paddle/trt_fp32/trt_fp16/trt_int8)|
| --batch_size | 检测模型预测时的batch size,在指定`image_dir`时有效 |
| --batch_size_keypoint | 关键点模型预测时的batch size,默认为8 |
| --run_benchmark | 是否重复预测来进行benchmark测速 |
| --output_dir | 输出图片所在的文件夹, 默认为output |
| --use_mkldnn | CPU预测中是否开启MKLDNN加速 |
| --cpu_threads | 设置cpu线程数,默认为1 |
| --use_dark | 关键点模型输出预测是否使用DarkPose后处理,默认为true |
**注意**:
(1)优先级顺序:`camera_id` > `video_file` > `image_dir` > `image_file`。
(2)如果提示找不到`opencv_world346.dll`,把`D:\projects\packages\opencv3_4_6\build\x64\vc14\bin`文件夹下的`opencv_world346.dll`拷贝到`main.exe`文件夹下即可。
(3)--run_benchmark如果设置为True,则需要安装依赖`pip install pynvml psutil GPUtil`。
`样例一`:
```shell
#不使用`GPU`测试图片 `D:\\images\\test.jpeg`
.\main --model_dir=D:\\models\\yolov3_darknet --image_file=D:\\images\\test.jpeg
```
图片文件`可视化预测结果`会保存在当前目录下`output.jpg`文件中。
`样例二`:
```shell
#使用`GPU`测试视频 `D:\\videos\\test.mp4`
.\main --model_dir=D:\\models\\yolov3_darknet --video_path=D:\\videos\\test.mp4 --device=GPU
```
视频文件目前支持`.mp4`格式的预测,`可视化预测结果`会保存在当前目录下`output.mp4`文件中。
`样例三`:
```shell
#使用关键点模型与检测模型联合预测,使用 `GPU`预测
#检测模型检测到的人送入关键点模型进行关键点预测
.\main --model_dir=D:\\models\\yolov3_darknet --model_dir_keypoint=D:\\models\\hrnet_w32_256x192 --image_file=D:\\images\\test.jpeg --device=GPU
```
## 性能测试
Benchmark请查看[BENCHMARK_INFER](../../BENCHMARK_INFER.md)
| PaddleDetection/deploy/cpp/docs/windows_vs2019_build.md/0 | {
"file_path": "PaddleDetection/deploy/cpp/docs/windows_vs2019_build.md",
"repo_id": "PaddleDetection",
"token_count": 4319
} | 42 |
[English](README.md) | 简体中文
# PP-PicoDet + PP-TinyPose (Pipeline) CPU-GPU Python部署示例
本目录下提供`det_keypoint_unite_infer.py`快速完成多人模型配置 PP-PicoDet + PP-TinyPose 在CPU/GPU,以及GPU上通过TensorRT加速部署的`单图多人关键点检测`示例。执行如下脚本即可完成.**注意**: PP-TinyPose单模型独立部署,请参考[PP-TinyPose 单模型](../README.md)
## 1. 部署环境准备
在部署前,需确认软硬件环境,同时下载预编译部署库,参考[FastDeploy安装文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#FastDeploy预编译库安装)安装FastDeploy预编译库。
## 2. 部署模型准备
在部署前,请准备好您所需要运行的推理模型,你可以选择使用[预导出的推理模型](../../README.md)或者[自行导出PaddleDetection部署模型](../../README.md)。
## 3. 运行部署示例
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd PaddleDetection/deploy/fastdeploy/kunlunxin/python/det_keypoint_unite
# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到develop分支
# git checkout develop
# 下载PP-TinyPose模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_TinyPose_256x192_infer.tgz
tar -xvf PP_TinyPose_256x192_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_PicoDet_V2_S_Pedestrian_320x320_infer.tgz
tar -xvf PP_PicoDet_V2_S_Pedestrian_320x320_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/000000018491.jpg
# 运行部署示例
python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image_file 000000018491.jpg
```
运行完成可视化结果如下图所示
<div align="center">
<img src="https://user-images.githubusercontent.com/16222477/196393343-eeb6b68f-0bc6-4927-871f-5ac610da7293.jpeg", width=640px, height=427px />
</div>
- 关于如何通过FastDeploy使用更多不同的推理后端,以及如何使用不同的硬件,请参考文档:[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
## 4. 部署示例选项说明
|参数|含义|默认值
|---|---|---|
|--tinypose_model_dir|指定关键点模型文件夹所在的路径|None|
|--det_model_dir|指定目标模型文件夹所在的路径|None|
|--image_file|指定测试图片所在的路径|None|
## 5. PPTinyPose 模型串联 Python接口
```python
fd.pipeline.PPTinyPose(det_model=None, pptinypose_model=None)
```
PPTinyPose Pipeline 模型加载和初始化,其中det_model是使用`fd.vision.detection.PicoDet`初始化的检测模型,pptinypose_model是使用`fd.vision.keypointdetection.PPTinyPose`初始化的关键点检测模型。
## 6. 更多指南
- [PaddleDetection Python API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/object_detection.html)
- [FastDeploy部署PaddleDetection模型概览](../../../)
- [C++部署](../../cpp)
## 7. 常见问题
- [如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
- [Intel GPU(独立显卡/集成显卡)的使用](https://github.com/PaddlePaddle/FastDeploy/blob/develop/tutorials/intel_gpu/README.md)
- [编译CPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/cpu.md)
- [编译GPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/gpu.md)
- [编译Jetson部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/jetson.md) | PaddleDetection/deploy/fastdeploy/kunlunxin/python/det_keypoint_unite/README.md/0 | {
"file_path": "PaddleDetection/deploy/fastdeploy/kunlunxin/python/det_keypoint_unite/README.md",
"repo_id": "PaddleDetection",
"token_count": 2077
} | 43 |
[English](README.md) | 简体中文
# PaddleDetection 服务化部署示例
本文档以PP-YOLOE模型(ppyoloe_crn_l_300e_coco)为例,进行详细介绍。其他PaddleDetection模型都已支持服务化部署,只需将下述命令中的模型和配置名字修改成要部署模型的名字。
PaddleDetection模型导出和预训练模型下载请看[PaddleDetection模型部署](../README.md)文档。
## 1. 部署环境准备
在服务化部署前,需确认
- 1. 服务化镜像的软硬件环境要求和镜像拉取命令请参考[FastDeploy服务化部署](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/README_CN.md)
## 2. 启动服务
```bash
#下载部署示例代码
git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd PaddleDetection/deploy/fastdeploy/serving
#下载PPYOLOE模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
tar xvf ppyoloe_crn_l_300e_coco.tgz
# 将配置文件放入预处理目录
mv ppyoloe_crn_l_300e_coco/infer_cfg.yml models/preprocess/1/
# 将模型放入 models/runtime/1目录下, 并重命名为model.pdmodel和model.pdiparams
mv ppyoloe_crn_l_300e_coco/model.pdmodel models/runtime/1/model.pdmodel
mv ppyoloe_crn_l_300e_coco/model.pdiparams models/runtime/1/model.pdiparams
# 将ppdet和runtime中的ppyoloe配置文件重命名成标准的config名字
# 其他模型比如faster_rcc就将faster_rcnn_config.pbtxt重命名为config.pbtxt
cp models/ppdet/ppyoloe_config.pbtxt models/ppdet/config.pbtxt
cp models/runtime/ppyoloe_runtime_config.pbtxt models/runtime/config.pbtxt
# 注意: 由于mask_rcnn模型多一个输出,需要将后处理目录(models/postprocess)中的mask_config.pbtxt重命名为config.pbtxt
# 拉取fastdeploy镜像(x.y.z为镜像版本号,需替换成fastdeploy版本数字)
# GPU镜像
docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10
# CPU镜像
docker pull paddlepaddle/fastdeploy:z.y.z-cpu-only-21.10
# 运行容器.容器名字为 fd_serving, 并挂载当前目录为容器的 /serving 目录
nvidia-docker run -it --net=host --name fd_serving --shm-size="1g" -v `pwd`/:/serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash
# 启动服务(不设置CUDA_VISIBLE_DEVICES环境变量,会拥有所有GPU卡的调度权限)
CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/serving/models
```
>> **注意**:
>> 由于mask_rcnn模型多一个输出,部署mask_rcnn需要将后处理目录(models/postprocess)中的mask_config.pbtxt重命名为config.pbtxt
>> 拉取镜像请看[服务化部署主文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/README_CN.md)
>> 执行fastdeployserver启动服务出现"Address already in use", 请使用`--grpc-port`指定grpc端口号来启动服务,同时更改客户端示例中的请求端口号.
>> 其他启动参数可以使用 fastdeployserver --help 查看
服务启动成功后, 会有以下输出:
```
......
I0928 04:51:15.784517 206 grpc_server.cc:4117] Started GRPCInferenceService at 0.0.0.0:8001
I0928 04:51:15.785177 206 http_server.cc:2815] Started HTTPService at 0.0.0.0:8000
I0928 04:51:15.826578 206 http_server.cc:167] Started Metrics Service at 0.0.0.0:8002
```
## 3. 客户端请求
在物理机器中执行以下命令,发送grpc请求并输出结果
```
#下载测试图片
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
#安装客户端依赖
python3 -m pip install tritonclient[all]
# 发送请求
python3 paddledet_grpc_client.py
```
发送请求成功后,会返回json格式的检测结果并打印输出:
```
output_name: DET_RESULT
[[159.93016052246094, 82.35527038574219, 199.8546600341797, 164.68682861328125],
... ...,
[60.200584411621094, 123.73260498046875, 108.83859252929688, 169.07467651367188]]
```
## 4. 配置修改
当前默认配置在GPU上运行Paddle引擎, 如果要在CPU或其他推理引擎上运行。 需要修改`models/runtime/config.pbtxt`中配置,详情请参考[配置文档](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/docs/zh_CN/model_configuration.md)
## 5. 使用VisualDL进行可视化部署
可以使用VisualDL进行[Serving可视化部署](https://github.com/PaddlePaddle/FastDeploy/blob/develop/serving/docs/zh_CN/vdl_management.md),上述启动服务、配置修改以及客户端请求的操作都可以基于VisualDL进行。
通过VisualDL的可视化界面对PaddleDetection进行服务化部署只需要如下三步:
```text
1. 载入模型库:./vision/detection/paddledetection/serving/models
2. 下载模型资源文件:点击preprocess模型,点击版本号1添加预训练模型,选择检测模型ppyoloe_crn_l_300e_coco进行下载,此时preprocess中将会有资源文件infer_cfg.yml。点击runtime模型,点击版本号1添加预训练模型,选择检测模型ppyoloe_crn_l_300e_coco进行下载,此时runtime中将会有资源文件model.pdmodel和model.pdiparams。
3. 设置启动配置文件:点击ensemble配置按钮,选择配置文件ppyoloe_config.pbtxt,并设为启动配置文件。点击runtime模型,选择配置文件ppyoloe_runtime_config.pbtxt,并设为启动配置文件。
4. 启动服务:点击启动服务按钮,输入启动参数。
```
<p align="center">
<img src="https://user-images.githubusercontent.com/22424850/211710983-2d1f1427-6738-409d-903b-2b4e4ab6cbfc.gif" width="100%"/>
</p>
| PaddleDetection/deploy/fastdeploy/serving/README.md/0 | {
"file_path": "PaddleDetection/deploy/fastdeploy/serving/README.md",
"repo_id": "PaddleDetection",
"token_count": 3207
} | 44 |
import logging
import numpy as np
import time
from typing import Optional
import cv2
import json
from tritonclient import utils as client_utils
from tritonclient.grpc import InferenceServerClient, InferInput, InferRequestedOutput, service_pb2_grpc, service_pb2
LOGGER = logging.getLogger("run_inference_on_triton")
class SyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
verbose=False,
resp_wait_s: Optional[float]=None, ):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
self._client = InferenceServerClient(
self._server_url, verbose=self._verbose)
error = self._verify_triton_state(self._client)
if error:
raise RuntimeError(
f"Could not communicate to Triton Server: {error}")
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} "
f"are up and ready!")
model_config = self._client.get_model_config(self._model_name,
self._model_version)
model_metadata = self._client.get_model_metadata(self._model_name,
self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
for tm in model_metadata.inputs:
print("tm:", tm)
self._inputs = {tm.name: tm for tm in model_metadata.inputs}
self._input_names = list(self._inputs)
self._outputs = {tm.name: tm for tm in model_metadata.outputs}
self._output_names = list(self._outputs)
self._outputs_req = [
InferRequestedOutput(name) for name in self._outputs
]
def Run(self, inputs):
"""
Args:
inputs: list, Each value corresponds to an input name of self._input_names
Returns:
results: dict, {name : numpy.array}
"""
infer_inputs = []
for idx, data in enumerate(inputs):
infer_input = InferInput(self._input_names[idx], data.shape,
"UINT8")
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
results = self._client.infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=self._outputs_req,
client_timeout=self._response_wait_t, )
results = {name: results.as_numpy(name) for name in self._output_names}
return results
def _verify_triton_state(self, triton_client):
if not triton_client.is_server_live():
return f"Triton server {self._server_url} is not live"
elif not triton_client.is_server_ready():
return f"Triton server {self._server_url} is not ready"
elif not triton_client.is_model_ready(self._model_name,
self._model_version):
return f"Model {self._model_name}:{self._model_version} is not ready"
return None
if __name__ == "__main__":
model_name = "ppdet"
model_version = "1"
url = "localhost:8001"
runner = SyncGRPCTritonRunner(url, model_name, model_version)
im = cv2.imread("000000014439.jpg")
im = np.array([im, ])
# batch input
# im = np.array([im, im, im])
for i in range(1):
result = runner.Run([im, ])
for name, values in result.items():
print("output_name:", name)
# values is batch
for value in values:
value = json.loads(value)
print(value['boxes'])
| PaddleDetection/deploy/fastdeploy/serving/paddledet_grpc_client.py/0 | {
"file_path": "PaddleDetection/deploy/fastdeploy/serving/paddledet_grpc_client.py",
"repo_id": "PaddleDetection",
"token_count": 1942
} | 45 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "json/json.h"
namespace PaddleDetection {
// Object for storing all preprocessed data
class ImageBlob {
public:
// image width and height
std::vector<float> im_shape_;
// Buffer for image data after preprocessing
std::vector<float> im_data_;
// in net data shape(after pad)
std::vector<float> in_net_shape_;
// Evaluation image width and height
// std::vector<float> eval_im_size_f_;
// Scale factor for image size to origin image size
std::vector<float> scale_factor_;
};
// Abstraction of preprocessing opration class
class PreprocessOp {
public:
virtual void Init(const Json::Value& item) = 0;
virtual void Run(cv::Mat* im, ImageBlob* data) = 0;
};
class InitInfo : public PreprocessOp {
public:
virtual void Init(const Json::Value& item) {}
virtual void Run(cv::Mat* im, ImageBlob* data);
};
class NormalizeImage : public PreprocessOp {
public:
virtual void Init(const Json::Value& item) {
mean_.clear();
scale_.clear();
for (auto tmp : item["mean"]) {
mean_.emplace_back(tmp.as<float>());
}
for (auto tmp : item["std"]) {
scale_.emplace_back(tmp.as<float>());
}
is_scale_ = item["is_scale"].as<bool>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
// CHW or HWC
std::vector<float> mean_;
std::vector<float> scale_;
bool is_scale_;
};
class Permute : public PreprocessOp {
public:
virtual void Init(const Json::Value& item) {}
virtual void Run(cv::Mat* im, ImageBlob* data);
};
class Resize : public PreprocessOp {
public:
virtual void Init(const Json::Value& item) {
interp_ = item["interp"].as<int>();
// max_size_ = item["target_size"].as<int>();
keep_ratio_ = item["keep_ratio"].as<bool>();
target_size_.clear();
for (auto tmp : item["target_size"]) {
target_size_.emplace_back(tmp.as<int>());
}
}
// Compute best resize scale for x-dimension, y-dimension
std::pair<float, float> GenerateScale(const cv::Mat& im);
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int interp_;
bool keep_ratio_;
std::vector<int> target_size_;
std::vector<int> in_net_shape_;
};
// Models with FPN need input shape % stride == 0
class PadStride : public PreprocessOp {
public:
virtual void Init(const Json::Value& item) {
stride_ = item["stride"].as<int>();
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int stride_;
};
class TopDownEvalAffine : public PreprocessOp {
public:
virtual void Init(const Json::Value& item) {
trainsize_.clear();
for (auto tmp : item["trainsize"]) {
trainsize_.emplace_back(tmp.as<int>());
}
}
virtual void Run(cv::Mat* im, ImageBlob* data);
private:
int interp_ = 1;
std::vector<int> trainsize_;
};
void CropImg(cv::Mat& img,
cv::Mat& crop_img,
std::vector<int>& area,
std::vector<float>& center,
std::vector<float>& scale,
float expandratio = 0.15);
class Preprocessor {
public:
void Init(const Json::Value& config_node) {
// initialize image info at first
ops_["InitInfo"] = std::make_shared<InitInfo>();
for (const auto& item : config_node) {
auto op_name = item["type"].as<std::string>();
ops_[op_name] = CreateOp(op_name);
ops_[op_name]->Init(item);
}
}
std::shared_ptr<PreprocessOp> CreateOp(const std::string& name) {
if (name == "Resize") {
return std::make_shared<Resize>();
} else if (name == "Permute") {
return std::make_shared<Permute>();
} else if (name == "NormalizeImage") {
return std::make_shared<NormalizeImage>();
} else if (name == "PadStride") {
// use PadStride instead of PadBatch
return std::make_shared<PadStride>();
} else if (name == "TopDownEvalAffine") {
return std::make_shared<TopDownEvalAffine>();
}
std::cerr << "can not find function of OP: " << name
<< " and return: nullptr" << std::endl;
return nullptr;
}
void Run(cv::Mat* im, ImageBlob* data);
public:
static const std::vector<std::string> RUN_ORDER;
private:
std::unordered_map<std::string, std::shared_ptr<PreprocessOp>> ops_;
};
} // namespace PaddleDetection
| PaddleDetection/deploy/lite/include/preprocess_op.h/0 | {
"file_path": "PaddleDetection/deploy/lite/include/preprocess_op.h",
"repo_id": "PaddleDetection",
"token_count": 1893
} | 46 |
[English](pphuman_action_en.md) | 简体中文
# PP-Human行为识别模块
## 目录
- [基于骨骼点的行为识别](#基于骨骼点的行为识别)
- [基于图像分类的行为识别](#基于图像分类的行为识别)
- [基于检测的行为识别](#基于检测的行为识别)
- [基于行人轨迹的行为识别](#基于行人轨迹的行为识别)
- [基于视频分类的行为识别](#基于视频分类的行为识别)
行为识别在智慧社区,安防监控等方向具有广泛应用,根据行为的不同,PP-Human中集成了基于视频分类、基于检测、基于图像分类,基于行人轨迹以及基于骨骼点的行为识别模块,方便用户根据需求进行选择。
## 基于骨骼点的行为识别
应用行为:摔倒识别
<div align="center">
<img src="https://user-images.githubusercontent.com/22989727/205582385-08a1b6ae-9b1b-465a-ac25-d6427571eb56.gif" width='600'/><br>
<center>数据来源及版权归属:天覆科技,感谢提供并开源实际场景数据,仅限学术研究使用</center>
</div>
### 模型库
基于骨骼点的行为识别包含行人检测/跟踪,关键点检测和摔倒行为识别三个模型,首先需要下载以下预训练模型
| 任务 | 算法 | 精度 | 预测速度(ms) | 模型权重 | 预测部署模型 |
|:---------------------|:---------:|:------:|:------:| :------: |:---------------------------------------------------------------------------------: |
| 行人检测/跟踪 | PP-YOLOE | mAP: 56.3 <br> MOTA: 72.0 | 检测: 16.2ms <br> 跟踪:22.3ms |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.pdparams) |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip) |
| 关键点识别 | HRNet | AP: 87.1 | 单人 2.9ms |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/dark_hrnet_w32_256x192.pdparams) |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/dark_hrnet_w32_256x192.zip)|
| 摔倒行为识别 | ST-GCN | 准确率: 96.43 | 单人 2.7ms | - |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/STGCN.zip) |
注:
1. 检测/跟踪模型精度为[MOT17](https://motchallenge.net/),[CrowdHuman](http://www.crowdhuman.org/),[HIEVE](http://humaninevents.org/)和部分业务数据融合训练测试得到。
2. 关键点模型使用[COCO](https://cocodataset.org/),[UAV-Human](https://github.com/SUTDCV/UAV-Human)和部分业务数据融合训练, 精度在业务数据测试集上得到。
3. 摔倒行为识别模型使用[NTU-RGB+D](https://rose1.ntu.edu.sg/dataset/actionRecognition/),[UR Fall Detection Dataset](http://fenix.univ.rzeszow.pl/~mkepski/ds/uf.html)和部分业务数据融合训练,精度在业务数据测试集上得到。
4. 预测速度为NVIDIA T4 机器上使用TensorRT FP16时的速度, 速度包含数据预处理、模型预测、后处理全流程。
### 配置说明
[配置文件](../../config/infer_cfg_pphuman.yml)中与行为识别相关的参数如下:
```
SKELETON_ACTION: # 基于骨骼点的行为识别模型配置
model_dir: output_inference/STGCN # 模型所在路径
batch_size: 1 # 预测批大小。 当前仅支持为1进行推理
max_frames: 50 # 动作片段对应的帧数。在行人ID对应时序骨骼点结果时达到该帧数后,会通过行为识别模型判断该段序列的动作类型。与训练设置一致时效果最佳。
display_frames: 80 # 显示帧数。当预测结果为摔倒时,在对应人物ID中显示状态的持续时间。
coord_size: [384, 512] # 坐标统一缩放到的尺度大小。与训练设置一致时效果最佳。
enable: False # 是否开启该功能
```
### 使用方法
1. 从`模型库`中下载`行人检测/跟踪`、`关键点识别`、`摔倒行为识别`三个预测部署模型并解压到```./output_inference```路径下;默认自动下载模型,如果手动下载,需要修改模型文件夹为模型存放路径。
2. 目前行为识别模块仅支持视频输入,根据期望开启的行为识别方案类型,设置infer_cfg_pphuman.yml中`SKELETON_ACTION`的enable: True, 然后启动命令如下:
```bash
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--video_file=test_video.mp4 \
--device=gpu \
```
3. 若修改模型路径,有以下两种方式:
- ```./deploy/pipeline/config/infer_cfg_pphuman.yml```下可以配置不同模型路径,关键点模型和摔倒行为识别模型分别对应`KPT`和`SKELETON_ACTION`字段,修改对应字段下的路径为实际期望的路径即可。
- 命令行中--config后面紧跟着增加`-o KPT.model_dir=xxx SKELETON_ACTION.model_dir=xxx `修改模型路径:
```bash
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
-o KPT.model_dir=./dark_hrnet_w32_256x192 SKELETON_ACTION.model_dir=./STGCN \
--video_file=test_video.mp4 \
--device=gpu
```
4. 启动命令中的完整参数说明,请参考[参数说明](./PPHuman_QUICK_STARTED.md)。
### 方案说明
1. 使用多目标跟踪获取视频输入中的行人检测框及跟踪ID序号,模型方案为PP-YOLOE,详细文档参考[PP-YOLOE](../../../../configs/ppyoloe/README_cn.md),跟踪方案为OC-SORT,详细文档参考[OC-SORT](../../../../configs/mot/ocsort)。
2. 通过行人检测框的坐标在输入视频的对应帧中截取每个行人。
3. 使用[关键点识别模型](../../../../configs/keypoint/hrnet/dark_hrnet_w32_256x192.yml)得到对应的17个骨骼特征点。骨骼特征点的顺序及类型与COCO一致,详见[如何准备关键点数据集](../../../../docs/tutorials/data/PrepareKeypointDataSet.md)中的`COCO数据集`部分。
4. 每个跟踪ID对应的目标行人各自累计骨骼特征点结果,组成该人物的时序关键点序列。当累计到预定帧数或跟踪丢失后,使用行为识别模型判断时序关键点序列的动作类型。当前版本模型支持摔倒行为的识别,预测得到的`class id`对应关系为:
```
0: 摔倒,
1: 其他
```
- 摔倒行为识别模型使用了[ST-GCN](https://arxiv.org/abs/1801.07455),并基于[PaddleVideo](https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/model_zoo/recognition/stgcn.md)套件完成模型训练。
## 基于图像分类的行为识别
应用行为:打电话识别
<div align="center">
<img src="https://user-images.githubusercontent.com/22989727/205596971-d92fd24e-977a-4742-91cc-ce5b4802473c.gif" width='600'/><br>
<center>数据来源及版权归属:天覆科技,感谢提供并开源实际场景数据,仅限学术研究使用</center>
</div>
### 模型库
基于图像分类的行为识别包含行人检测/跟踪,打电话识别两个模型,首先需要下载以下预训练模型
| 任务 | 算法 | 精度 | 预测速度(ms) | 模型权重 | 预测部署模型 |
|:---------------------|:---------:|:------:|:------:| :------: |:---------------------------------------------------------------------------------: |
| 行人检测/跟踪 | PP-YOLOE | mAP: 56.3 <br> MOTA: 72.0 | 检测: 16.2ms <br> 跟踪:22.3ms |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.pdparams) |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip) |
| 打电话识别 | PP-HGNet | 准确率: 86.85 | 单人 2.94ms | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_tiny_calling_halfbody.pdparams) | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_tiny_calling_halfbody.zip) |
注:
1. 检测/跟踪模型精度为[MOT17](https://motchallenge.net/),[CrowdHuman](http://www.crowdhuman.org/),[HIEVE](http://humaninevents.org/)和部分业务数据融合训练测试得到。
2. 打电话行为识别模型使用[UAV-Human](https://github.com/SUTDCV/UAV-Human)的打电话行为部分进行训练和测试。
3. 预测速度为NVIDIA T4 机器上使用TensorRT FP16时的速度, 速度包含数据预处理、模型预测、后处理全流程。
### 配置说明
[配置文件](../../config/infer_cfg_pphuman.yml)中相关的参数如下:
```
ID_BASED_CLSACTION: # 基于分类的行为识别模型配置
model_dir: output_inference/PPHGNet_tiny_calling_halfbody # 模型所在路径
batch_size: 8 # 预测批大小
threshold: 0.45 #识别为对应行为的阈值
display_frames: 80 # 显示帧数。当识别到对应动作时,在对应人物ID中显示状态的持续时间。
enable: False # 是否开启该功能
```
### 使用方法
1. 从`模型库`中下载`行人检测/跟踪`、`打电话行为识别`两个预测部署模型并解压到`./output_inference`路径下;默认自动下载模型,如果手动下载,需要修改模型文件夹为模型存放路径。
2. 修改配置文件`deploy/pipeline/config/infer_cfg_pphuman.yml`中`ID_BASED_CLSACTION`下的`enable`为`True`;
3. 仅支持输入视频,启动命令如下:
```
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--video_file=test_video.mp4 \
--device=gpu
```
4. 启动命令中的完整参数说明,请参考[参数说明](./PPHuman_QUICK_STARTED.md)。
### 方案说明
1. 使用目标检测与多目标跟踪获取视频输入中的行人检测框及跟踪ID序号,模型方案为PP-YOLOE,详细文档参考[PP-YOLOE](../../../../configs/ppyoloe/README_cn.md),跟踪方案为OC-SORT,详细文档参考[OC-SORT](../../../../configs/mot/ocsort)。
2. 通过行人检测框的坐标在输入视频的对应帧中截取每个行人。
3. 通过在帧级别的行人图像通过图像分类的方式实现。当图片所属类别为对应行为时,即认为在一定时间段内该人物处于该行为状态中。该任务使用[PP-HGNet](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/PP-HGNet.md)实现,当前版本模型支持打电话行为的识别,预测得到的`class id`对应关系为:
```
0: 打电话,
1: 其他
```
- 基于分类的行为识别基于[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/PP-HGNet.md#3.3)完成模型训练。
## 基于检测的行为识别
应用行为:吸烟识别
<div align="center">
<img src="https://user-images.githubusercontent.com/22989727/205599300-380c3805-63d6-43cc-9b77-2687b1328d7b.gif" width='600'/><br>
<center>数据来源及版权归属:天覆科技,感谢提供并开源实际场景数据,仅限学术研究使用</center>
</div>
### 模型库
在这里,我们提供了行人检测/跟踪、吸烟行为识别的预训练模型,用户可以直接下载使用。
| 任务 | 算法 | 精度 | 预测速度(ms) | 模型权重 | 预测部署模型 |
|:---------------------|:---------:|:------:|:------:| :------: |:---------------------------------------------------------------------------------: |
| 行人检测/跟踪 | PP-YOLOE | mAP: 56.3 <br> MOTA: 72.0 | 检测: 16.2ms <br> 跟踪:22.3ms |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.pdparams) |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip) |
| 吸烟行为识别 | PP-YOLOE | mAP: 39.7 | 单人 2.0ms | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/ppyoloe_crn_s_80e_smoking_visdrone.pdparams) | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/ppyoloe_crn_s_80e_smoking_visdrone.zip) |
注:
1. 检测/跟踪模型精度为[MOT17](https://motchallenge.net/),[CrowdHuman](http://www.crowdhuman.org/),[HIEVE](http://humaninevents.org/)和部分业务数据融合训练测试得到。
2. 抽烟行为识别模型使用业务数据进行训练和测试。
3. 预测速度为NVIDIA T4 机器上使用TensorRT FP16时的速度, 速度包含数据预处理、模型预测、后处理全流程。
### 配置说明
[配置文件](../../config/infer_cfg_pphuman.yml)中相关的参数如下:
```
ID_BASED_DETACTION: # 基于检测的行为识别模型配置
model_dir: output_inference/ppyoloe_crn_s_80e_smoking_visdrone # 模型所在路径
batch_size: 8 # 预测批大小
threshold: 0.4 # 识别为对应行为的阈值
display_frames: 80 # 显示帧数。当识别到对应动作时,在对应人物ID中显示状态的持续时间。
enable: False # 是否开启该功能
```
### 使用方法
1. 从`模型库`中下载`行人检测/跟踪`、`抽烟行为识别`两个预测部署模型并解压到`./output_inference`路径下;默认自动下载模型,如果手动下载,需要修改模型文件夹为模型存放路径。
2. 修改配置文件`deploy/pipeline/config/infer_cfg_pphuman.yml`中`ID_BASED_DETACTION`下的`enable`为`True`;
3. 仅支持输入视频,启动命令如下:
```
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--video_file=test_video.mp4 \
--device=gpu
```
4. 启动命令中的完整参数说明,请参考[参数说明](./PPHuman_QUICK_STARTED.md)。
### 方案说明
1. 使用目标检测与多目标跟踪获取视频输入中的行人检测框及跟踪ID序号,模型方案为PP-YOLOE,详细文档参考[PP-YOLOE](../../../../configs/ppyoloe/README_cn.md),跟踪方案为OC-SORT,详细文档参考[OC-SORT](../../../../configs/mot/ocsort)。
2. 通过行人检测框的坐标在输入视频的对应帧中截取每个行人。
3. 通过在帧级别的行人图像中检测该行为的典型特定目标实现。当检测到特定目标(在这里即烟头)以后,即认为在一定时间段内该人物处于该行为状态中。该任务使用[PP-YOLOE](../../../../configs/ppyoloe/README_cn.md)实现,当前版本模型支持吸烟行为的识别,预测得到的`class id`对应关系为:
```
0: 吸烟,
1: 其他
```
## 基于行人轨迹的行为识别
应用行为:闯入识别
<div align="center">
<img src="https://user-images.githubusercontent.com/22989727/178769370-03ab1965-cfd1-401b-9902-82620a06e43c.gif" width='600'/>
</div>
具体使用请参照[PP-Human检测跟踪模块](pphuman_mot.md)的`5. 区域闯入判断和计数`。
### 方案说明
1. 使用多目标跟踪获取视频输入中的行人检测框及跟踪ID序号,模型方案为PP-YOLOE,详细文档参考[PP-YOLOE](../../../../configs/ppyoloe/README_cn.md),跟踪方案为OC-SORT,详细文档参考[OC-SORT](../../../../configs/mot/ocsort)。
2. 通过行人检测框的下边界中点在相邻帧位于用户所选区域的内外位置,来识别是否闯入所选区域。
## 基于视频分类的行为识别
应用行为:打架识别
<div align="center">
<img src="https://user-images.githubusercontent.com/22989727/205597198-8b4333b3-6c39-472c-a25c-018dac908867.gif" width='600'/><br>
<center>数据来源及版权归属:Surveillance Camera Fight Dataset。</center>
</div>
该方案关注的场景为监控摄像头下的打架行为识别。打架行为涉及多人,基于骨骼点技术的方案更适用于单人的行为识别。此外,打架行为对时序信息依赖较强,基于检测和分类的方案也不太适用。由于监控场景背景复杂,人的密集程度、光线、拍摄角度等都会对识别造成影响,本方案采用基于视频分类的方式判断视频中是否存在打架行为。针对摄像头距离人较远的情况,通过增大输入图像分辨率优化。由于训练数据有限,采用数据增强的方式提升模型的泛化性能。
### 模型库
在这里,我们提供了打架识别的预训练模型,用户可以直接下载使用。
| 任务 | 算法 | 精度 | 预测速度(ms) | 模型权重 | 预测部署模型 |
|:---------------------|:---------:|:------:|:------:| :------: |:---------------------------------------------------------------------------------: |
| 打架识别 | PP-TSM | 准确率:89.06% | 2s视频 128ms | [下载链接](https://videotag.bj.bcebos.com/PaddleVideo-release2.3/ppTSM_fight.pdparams) | [下载链接](https://videotag.bj.bcebos.com/PaddleVideo-release2.3/ppTSM_fight.zip) |
注:
1. 打架识别模型基于6个公开数据集训练得到:Surveillance Camera Fight Dataset、A Dataset for Automatic Violence Detection in Videos、Hockey Fight Detection Dataset、Video Fight Detection Dataset、Real Life Violence Situations Dataset、UBI Abnormal Event Detection Dataset。
2. 预测速度为NVIDIA T4 机器上使用TensorRT FP16时的速度, 速度包含数据预处理、模型预测、后处理全流程。
### 配置说明
[配置文件](../../config/infer_cfg_pphuman.yml)中与行为识别相关的参数如下:
```
VIDEO_ACTION: # 基于视频分类的行为识别模型配置
model_dir: output_inference/ppTSM # 模型所在路径
batch_size: 1 # 预测批大小。当前仅支持为1进行推理
frame_len: 8 # 累计抽样帧数量,达到该数量后执行一次识别
sample_freq: 7 # 抽样频率,即间隔多少帧抽样一帧
short_size: 340 # 视频帧尺度变换最小边的长度
target_size: 320 # 目标视频帧的大小
enable: False # 是否开启该功能
```
### 使用方法
1. 从上表链接中下载`打架识别`任务的预测部署模型并解压到`./output_inference`路径下;默认自动下载模型,如果手动下载,需要修改模型文件夹为模型存放路径。
2. 修改配置文件`deploy/pphuman/config/infer_cfg_pphuman.yml`中`VIDEO_ACTION`下的`enable`为`True`;
3. 仅支持输入视频,启动命令如下:
```
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--video_file=test_video.mp4 \
--device=gpu
```
4. 启动命令中的完整参数说明,请参考[参数说明](./PPHuman_QUICK_STARTED.md)。
### 方案说明
目前打架识别模型使用的是[PP-TSM](https://github.com/PaddlePaddle/PaddleVideo/blob/develop/docs/zh-CN/model_zoo/recognition/pp-tsm.md),并在PP-TSM视频分类模型训练流程的基础上修改适配,完成模型训练。对于输入的视频或者视频流,进行等间隔抽帧,当视频帧累计到指定数目时,输入到视频分类模型中判断是否存在打架行为。
## 参考文献
```
@inproceedings{stgcn2018aaai,
title = {Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition},
author = {Sijie Yan and Yuanjun Xiong and Dahua Lin},
booktitle = {AAAI},
year = {2018},
}
`````
| PaddleDetection/deploy/pipeline/docs/tutorials/pphuman_action.md/0 | {
"file_path": "PaddleDetection/deploy/pipeline/docs/tutorials/pphuman_action.md",
"repo_id": "PaddleDetection",
"token_count": 11948
} | 47 |
[English](ppvehicle_press_en.md) | 简体中文
# PP-Vehicle压实线识别模块
车辆压实线识别在智慧城市,智慧交通等方向具有广泛应用。在PP-Vehicle中,集成了车辆压实线识别模块,可识别车辆是否违章压实线。
| 任务 | 算法 | 精度 | 预测速度 | 下载链接|
|-----------|------|-----------|----------|---------------|
| 车辆检测/跟踪 | PP-YOLOE | mAP 63.9 | 38.67ms | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip) |
| 车道线识别 | PP-liteseg | mIou 32.69 | 47 ms | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/pipeline/pp_lite_stdc2_bdd100k.zip) |
注意:
1. 车辆检测/跟踪模型预测速度是基于NVIDIA T4, 开启TensorRT FP16得到。模型预测速度包含数据预处理、模型预测、后处理部分。
2. 车辆检测/跟踪模型的训练和精度测试均基于[VeRi数据集](https://www.v7labs.com/open-datasets/veri-dataset)。
3. 车道线模型预测速度基于Tesla P40,python端预测,模型预测速度包含数据预处理、模型预测、后处理部分。
4. 车道线模型训练和精度测试均基于[BDD100K-LaneSeg](https://bdd-data.berkeley.edu/portal.html#download)和[Apollo Scape](http://apolloscape.auto/lane_segmentation.html#to_dataset_href),两个数据集车道线分割[标签](https://bj.bcebos.com/v1/paddledet/data/mot/bdd100k/lane_dataset_label.zip)
## 使用方法
### 配置项说明
[配置文件](../../config/infer_cfg_ppvehicle.yml)中与车辆压线相关的参数如下:
```
VEHICLE_PRESSING:
enable: True #是否开启功能
LANE_SEG:
lane_seg_config: deploy/pipeline/config/lane_seg_config.yml #车道线提取配置文件
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/pp_lite_stdc2_bdd100k.zip #模型文件路径
```
[车道线配置文件](../../config/lane_seg_config.yml)中与车道线提取相关的参数如下:
```
type: PLSLaneseg #选择分割模型
PLSLaneseg:
batch_size: 1 #图片batch_size
device: gpu #选择gpu还是cpu
filter_flag: True #是否过滤水平方向道路线
horizontal_filtration_degree: 23 #过滤水平方向车道线阈值,当分割出来的车道线最大倾斜角与
#最小倾斜角差值小于阈值时,不进行过滤
horizontal_filtering_threshold: 0.25 #确定竖直方向与水平方向分开阈值
#thr = (min_degree+max_degree)*0.25
#根据车道线倾斜角与thr的大小比较,将车道线分为垂直方向与水平方向
```
### 使用命令
1. 从模型库下载`车辆检测/跟踪`, `车道线识别`两个预测部署模型并解压到`./output_inference`路径下;默认会自动下载模型,如果手动下载,需要修改模型文件夹为模型存放路径。
2. 修改配置文件中`VEHICLE_PRESSING`项的`enable: True`,以启用该功能。
3. 图片输入时,启动命令如下(更多命令参数说明,请参考[快速开始-参数说明](./PPVehicle_QUICK_STARTED.md)):
```bash
# 预测单张图片文件
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
-o VEHICLE_PRESSING.enable=true
--image_file=test_image.jpg \
--device=gpu
# 预测包含一张或多张图片的文件夹
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
-o VEHICLE_PRESSING.enable=true
--image_dir=images/ \
--device=gpu
```
4. 视频输入时,启动命令如下:
```bash
#预测单个视频文件
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
-o VEHICLE_PRESSING.enable=true
--video_file=test_video.mp4 \
--device=gpu
#预测包含一个或多个视频的文件夹
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--video_dir=test_videos/ \
-o VEHICLE_PRESSING.enable=true
--device=gpu
```
5. 若修改模型路径,有以下两种方式:
- 方法一:`./deploy/pipeline/config/infer_cfg_ppvehicle.yml`下可以配置不同模型路径,车道线识别模型修改`LANE_SEG`字段下配置
- 方法二:直接在命令行中增加`-o`,以覆盖配置文件中的默认模型路径:
```bash
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--video_file=test_video.mp4 \
--device=gpu \
-o VEHICLE_PRESSING.enable=true
LANE_SEG.model_dir=output_inference
```
测试效果如下:
<div width="1000" align="center">
<img src="https://raw.githubusercontent.com/LokeZhou/PaddleDetection/develop/deploy/pipeline/docs/images/vehicle_press.gif"/>
</div>
## 方案说明
1.车道线识别模型使用了[PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg) 的超轻量分割方案。训练样本[标签](https://bj.bcebos.com/v1/paddledet/data/mot/bdd100k/lane_dataset_label.zip)分为4类:
0 背景
1 双黄线
2 实线
3 虚线
车辆压线分析过滤虚线类;
2.车道线通过对分割结果聚类得到,且默认过滤水平方向车道线,若不过滤可在[车道线配置文件](../../config/lane_seg_config.yml)修改`filter_flag`参数;
3.车辆压线判断条件:车辆的检测框底边线与车道线是否有交点;
**性能优化措施**
1.因摄像头视角原因,可以根据实际情况决定是否过滤水平方向车道线;
| PaddleDetection/deploy/pipeline/docs/tutorials/ppvehicle_press.md/0 | {
"file_path": "PaddleDetection/deploy/pipeline/docs/tutorials/ppvehicle_press.md",
"repo_id": "PaddleDetection",
"token_count": 4158
} | 48 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class KeyPointSequence(object):
def __init__(self, max_size=100):
self.frames = 0
self.kpts = []
self.bboxes = []
self.max_size = max_size
def save(self, kpt, bbox):
self.kpts.append(kpt)
self.bboxes.append(bbox)
self.frames += 1
if self.frames == self.max_size:
return True
return False
class KeyPointBuff(object):
def __init__(self, max_size=100):
self.flag_track_interrupt = False
self.keypoint_saver = dict()
self.max_size = max_size
self.id_to_pop = set()
self.flag_to_pop = False
def get_state(self):
return self.flag_to_pop
def update(self, kpt_res, mot_res):
kpts = kpt_res.get('keypoint')[0]
bboxes = kpt_res.get('bbox')
mot_bboxes = mot_res.get('boxes')
updated_id = set()
for idx in range(len(kpts)):
tracker_id = mot_bboxes[idx, 0]
updated_id.add(tracker_id)
kpt_seq = self.keypoint_saver.get(tracker_id,
KeyPointSequence(self.max_size))
is_full = kpt_seq.save(kpts[idx], bboxes[idx])
self.keypoint_saver[tracker_id] = kpt_seq
#Scene1: result should be popped when frames meet max size
if is_full:
self.id_to_pop.add(tracker_id)
self.flag_to_pop = True
#Scene2: result of a lost tracker should be popped
interrupted_id = set(self.keypoint_saver.keys()) - updated_id
if len(interrupted_id) > 0:
self.flag_to_pop = True
self.id_to_pop.update(interrupted_id)
def get_collected_keypoint(self):
"""
Output (List): List of keypoint results for Skeletonbased Recognition task, where
the format of each element is [tracker_id, KeyPointSequence of tracker_id]
"""
output = []
for tracker_id in self.id_to_pop:
output.append([tracker_id, self.keypoint_saver[tracker_id]])
del (self.keypoint_saver[tracker_id])
self.flag_to_pop = False
self.id_to_pop.clear()
return output
class ActionVisualHelper(object):
def __init__(self, frame_life=20):
self.frame_life = frame_life
self.action_history = {}
def get_visualize_ids(self):
id_detected = self.check_detected()
return id_detected
def check_detected(self):
id_detected = set()
deperate_id = []
for mot_id in self.action_history:
self.action_history[mot_id]["life_remain"] -= 1
if int(self.action_history[mot_id]["class"]) == 0:
id_detected.add(mot_id)
if self.action_history[mot_id]["life_remain"] == 0:
deperate_id.append(mot_id)
for mot_id in deperate_id:
del (self.action_history[mot_id])
return id_detected
def update(self, action_res_list):
for mot_id, action_res in action_res_list:
if mot_id in self.action_history:
if int(action_res["class"]) != 0 and int(self.action_history[
mot_id]["class"]) == 0:
continue
action_info = self.action_history.get(mot_id, {})
action_info["class"] = action_res["class"]
action_info["life_remain"] = self.frame_life
self.action_history[mot_id] = action_info
| PaddleDetection/deploy/pipeline/pphuman/action_utils.py/0 | {
"file_path": "PaddleDetection/deploy/pipeline/pphuman/action_utils.py",
"repo_id": "PaddleDetection",
"token_count": 1910
} | 49 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import glob
from functools import reduce
import time
import cv2
import numpy as np
import math
import paddle
import sys
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
sys.path.insert(0, parent_path)
from python.infer import get_test_images
from python.preprocess import preprocess, NormalizeImage, Permute, Resize_Mult32
from pipeline.ppvehicle.vehicle_plateutils import create_predictor, get_infer_gpuid, get_rotate_crop_image, draw_boxes
from pipeline.ppvehicle.vehicleplate_postprocess import build_post_process
from pipeline.cfg_utils import merge_cfg, print_arguments, argsparser
class PlateDetector(object):
def __init__(self, args, cfg):
self.args = args
self.pre_process_list = {
'Resize_Mult32': {
'limit_side_len': cfg['det_limit_side_len'],
'limit_type': cfg['det_limit_type'],
},
'NormalizeImage': {
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'is_scale': True,
},
'Permute': {}
}
postprocess_params = {}
postprocess_params['name'] = 'DBPostProcess'
postprocess_params["thresh"] = 0.3
postprocess_params["box_thresh"] = 0.6
postprocess_params["max_candidates"] = 1000
postprocess_params["unclip_ratio"] = 1.5
postprocess_params["use_dilation"] = False
postprocess_params["score_mode"] = "fast"
self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.input_tensor, self.output_tensors, self.config = create_predictor(
args, cfg, 'det')
def preprocess(self, im_path):
preprocess_ops = []
for op_type, new_op_info in self.pre_process_list.items():
preprocess_ops.append(eval(op_type)(**new_op_info))
input_im_lst = []
input_im_info_lst = []
im, im_info = preprocess(im_path, preprocess_ops)
input_im_lst.append(im)
input_im_info_lst.append(im_info['im_shape'] / im_info['scale_factor'])
return np.stack(input_im_lst, axis=0), input_im_info_lst
def order_points_clockwise(self, pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def clip_det_res(self, points, img_height, img_width):
for pno in range(points.shape[0]):
points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
return points
def filter_tag_det_res(self, dt_boxes, image_shape):
img_height, img_width = image_shape[0:2]
dt_boxes_new = []
for box in dt_boxes:
box = self.order_points_clockwise(box)
box = self.clip_det_res(box, img_height, img_width)
rect_width = int(np.linalg.norm(box[0] - box[1]))
rect_height = int(np.linalg.norm(box[0] - box[3]))
if rect_width <= 3 or rect_height <= 3:
continue
dt_boxes_new.append(box)
dt_boxes = np.array(dt_boxes_new)
return dt_boxes
def filter_tag_det_res_only_clip(self, dt_boxes, image_shape):
img_height, img_width = image_shape[0:2]
dt_boxes_new = []
for box in dt_boxes:
box = self.clip_det_res(box, img_height, img_width)
dt_boxes_new.append(box)
dt_boxes = np.array(dt_boxes_new)
return dt_boxes
def predict_image(self, img_list):
st = time.time()
dt_batch_boxes = []
for image in img_list:
img, shape_list = self.preprocess(image)
if img is None:
return None, 0
self.input_tensor.copy_from_cpu(img)
self.predictor.run()
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
preds = {}
preds['maps'] = outputs[0]
#self.predictor.try_shrink_memory()
post_result = self.postprocess_op(preds, shape_list)
# print("post_result length:{}".format(len(post_result)))
org_shape = image.shape
dt_boxes = post_result[0]['points']
dt_boxes = self.filter_tag_det_res(dt_boxes, org_shape)
dt_batch_boxes.append(dt_boxes)
et = time.time()
return dt_batch_boxes, et - st
class TextRecognizer(object):
def __init__(self, args, cfg, use_gpu=True):
self.rec_image_shape = cfg['rec_image_shape']
self.rec_batch_num = cfg['rec_batch_num']
word_dict_path = cfg['word_dict_path']
use_space_char = True
postprocess_params = {
'name': 'CTCLabelDecode',
"character_dict_path": word_dict_path,
"use_space_char": use_space_char
}
self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.input_tensor, self.output_tensors, self.config = \
create_predictor(args, cfg, 'rec')
self.use_onnx = False
def resize_norm_img(self, img, max_wh_ratio):
imgC, imgH, imgW = self.rec_image_shape
assert imgC == img.shape[2]
imgW = int((imgH * max_wh_ratio))
if self.use_onnx:
w = self.input_tensor.shape[3:][0]
if w is not None and w > 0:
imgW = w
h, w = img.shape[:2]
ratio = w / float(h)
if math.ceil(imgH * ratio) > imgW:
resized_w = imgW
else:
resized_w = int(math.ceil(imgH * ratio))
resized_image = cv2.resize(img, (resized_w, imgH))
resized_image = resized_image.astype('float32')
resized_image = resized_image.transpose((2, 0, 1)) / 255
resized_image -= 0.5
resized_image /= 0.5
padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
padding_im[:, :, 0:resized_w] = resized_image
return padding_im
def predict_text(self, img_list):
img_num = len(img_list)
# Calculate the aspect ratio of all text bars
width_list = []
for img in img_list:
width_list.append(img.shape[1] / float(img.shape[0]))
# Sorting can speed up the recognition process
indices = np.argsort(np.array(width_list))
rec_res = [['', 0.0]] * img_num
batch_num = self.rec_batch_num
st = time.time()
for beg_img_no in range(0, img_num, batch_num):
end_img_no = min(img_num, beg_img_no + batch_num)
norm_img_batch = []
imgC, imgH, imgW = self.rec_image_shape
max_wh_ratio = imgW / imgH
# max_wh_ratio = 0
for ino in range(beg_img_no, end_img_no):
h, w = img_list[indices[ino]].shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
for ino in range(beg_img_no, end_img_no):
norm_img = self.resize_norm_img(img_list[indices[ino]],
max_wh_ratio)
norm_img = norm_img[np.newaxis, :]
norm_img_batch.append(norm_img)
norm_img_batch = np.concatenate(norm_img_batch)
norm_img_batch = norm_img_batch.copy()
if self.use_onnx:
input_dict = {}
input_dict[self.input_tensor.name] = norm_img_batch
outputs = self.predictor.run(self.output_tensors, input_dict)
preds = outputs[0]
else:
self.input_tensor.copy_from_cpu(norm_img_batch)
self.predictor.run()
outputs = []
for output_tensor in self.output_tensors:
output = output_tensor.copy_to_cpu()
outputs.append(output)
if len(outputs) != 1:
preds = outputs
else:
preds = outputs[0]
rec_result = self.postprocess_op(preds)
for rno in range(len(rec_result)):
rec_res[indices[beg_img_no + rno]] = rec_result[rno]
return rec_res, time.time() - st
class PlateRecognizer(object):
def __init__(self, args, cfg):
use_gpu = args.device.lower() == "gpu"
self.platedetector = PlateDetector(args, cfg)
self.textrecognizer = TextRecognizer(args, cfg, use_gpu=use_gpu)
def get_platelicense(self, image_list):
plate_text_list = []
plateboxes, det_time = self.platedetector.predict_image(image_list)
for idx, boxes_pcar in enumerate(plateboxes):
plate_pcar_list = []
for box in boxes_pcar:
plate_images = get_rotate_crop_image(image_list[idx], box)
plate_texts = self.textrecognizer.predict_text([plate_images])
plate_pcar_list.append(plate_texts)
plate_text_list.append(plate_pcar_list)
return self.check_plate(plate_text_list)
def check_plate(self, text_list):
plate_all = {"plate": []}
for text_pcar in text_list:
platelicense = ""
for text_info in text_pcar:
text = text_info[0][0][0]
if len(text) > 2 and len(text) < 10:
platelicense = self.replace_cn_code(text)
plate_all["plate"].append(platelicense)
return plate_all
def replace_cn_code(self, text):
simcode = {
'浙': 'ZJ-',
'粤': 'GD-',
'京': 'BJ-',
'津': 'TJ-',
'冀': 'HE-',
'晋': 'SX-',
'蒙': 'NM-',
'辽': 'LN-',
'黑': 'HLJ-',
'沪': 'SH-',
'吉': 'JL-',
'苏': 'JS-',
'皖': 'AH-',
'赣': 'JX-',
'鲁': 'SD-',
'豫': 'HA-',
'鄂': 'HB-',
'湘': 'HN-',
'桂': 'GX-',
'琼': 'HI-',
'渝': 'CQ-',
'川': 'SC-',
'贵': 'GZ-',
'云': 'YN-',
'藏': 'XZ-',
'陕': 'SN-',
'甘': 'GS-',
'青': 'QH-',
'宁': 'NX-',
'闽': 'FJ-',
'·': ' '
}
for _char in text:
if _char in simcode:
text = text.replace(_char, simcode[_char])
return text
def main():
cfg = merge_cfg(FLAGS)
print_arguments(cfg)
vehicleplate_cfg = cfg['VEHICLE_PLATE']
detector = PlateRecognizer(FLAGS, vehicleplate_cfg)
# predict from image
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
for img in img_list:
image = cv2.imread(img)
results = detector.get_platelicense([image])
print(results)
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU', 'NPU'
], "device should be CPU, GPU, NPU or XPU"
main()
| PaddleDetection/deploy/pipeline/ppvehicle/vehicle_plate.py/0 | {
"file_path": "PaddleDetection/deploy/pipeline/ppvehicle/vehicle_plate.py",
"repo_id": "PaddleDetection",
"token_count": 6183
} | 50 |
# C++端预测部署
在PaddlePaddle中预测引擎和训练引擎底层有着不同的优化方法, 预测引擎使用了AnalysisPredictor,专门针对推理进行了优化,该引擎可以对模型进行多项图优化,减少不必要的内存拷贝。如果用户在部署已训练模型的过程中对性能有较高的要求,我们提供了独立于PaddleDetection的预测脚本,方便用户直接集成部署。当前C++部署支持基于Fairmot的单镜头类别预测部署,并支持人流量统计、出入口计数功能。
主要包含三个步骤:
- 准备环境
- 导出预测模型
- C++预测
## 一、准备环境
环境要求:
- GCC 8.2
- CUDA 10.1/10.2/11.1; CUDNN 7.6/8.1
- CMake 3.0+
- TensorRT 6/7
NVIDIA Jetson用户请参考[Jetson平台编译指南](../../cpp/docs/Jetson_build.md#jetson环境搭建)完成JetPack安装
### 1. 下载代码
```
git clone https://github.com/PaddlePaddle/PaddleDetection.git
# C++部署代码与其他目录代码独立
cd deploy/pptracking/cpp
```
### 2. 下载或编译PaddlePaddle C++预测库
请根据环境选择适当的预测库进行下载,参考[C++预测库下载列表](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html)
下载并解压后`./paddle_inference`目录包含内容为:
```
paddle_inference
├── paddle # paddle核心库和头文件
|
├── third_party # 第三方依赖库和头文件
|
└── version.txt # 版本和编译信息
```
**注意:** 如果用户环境与官网提供环境不一致(如cuda 、cudnn、tensorrt版本不一致等),或对飞桨源代码有修改需求,或希望进行定制化构建,可参考[文档](https://paddleinference.paddlepaddle.org.cn/user_guides/source_compile.html)自行源码编译预测库。
### 3. 编译
<details>
<summary>Linux编译:</summary>
编译`cmake`的命令在`scripts/build.sh`中,请根据实际情况修改主要参数,其主要内容说明如下:
```
# 是否使用GPU(即是否使用 CUDA)
WITH_GPU=ON
# 是否使用MKL or openblas,TX2需要设置为OFF
WITH_MKL=OFF
# 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=ON
# TensorRT 的include路径
TENSORRT_INC_DIR=/path/to/TensorRT/include
# TensorRT 的lib路径
TENSORRT_LIB_DIR=/path/to/TensorRT/lib
# Paddle 预测库路径
PADDLE_DIR=/path/to/paddle_inference/
# Paddle 预测库名称
PADDLE_LIB_NAME=libpaddle_inference
# CUDA 的 lib 路径
CUDA_LIB=/path/to/cuda/lib
# CUDNN 的 lib 路径
CUDNN_LIB=/path/to/cudnn/lib
# OPENCV路径
OPENCV_DIR=/path/to/opencv
```
修改脚本设置好主要参数后,执行```build.sh```脚本:
```
sh ./scripts/build.sh
```
</details>
<details>
<summary>Windows编译:</summary>
- 安装配置OpenCV
1. 在OpenCV官网下载适用于Windows平台的3.4.6版本,[下载地址](https://sourceforge.net/projects/opencvlibrary/files/3.4.6/opencv-3.4.6-vc14_vc15.exe/download)
2. 运行下载的可执行文件,将OpenCV解压至指定目录,如`D:\projects\opencv`
3. 配置环境变量,如下流程所示(如果使用全局绝对路径,可以不用设置环境变量)
- 我的电脑->属性->高级系统设置->环境变量
- 在系统变量中找到Path(如没有,自行创建),并双击编辑
- 新建,将opencv路径填入并保存,如`D:\projects\opencv\build\x64\vc14\bin`
- 使用CMake生成项目文件
执行如下命令项目文件:
```
cmake . -G "Visual Studio 16 2019" -A x64 -T host=x64 -DWITH_GPU=ON -DWITH_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCUDA_LIB=path_to_cuda_lib -DCUDNN_LIB=path_to_cudnn_lib -DPADDLE_DIR=path_to_paddle_lib -DPADDLE_LIB_NAME=paddle_inference -DOPENCV_DIR=path_to_opencv -DWITH_KEYPOINT=ON
```
- 编译
用`Visual Studio 2019`打开`cpp`文件夹下的`PaddleObjectDetector.sln`,将编译模式设置为`Release`,点击`生成`->`全部生成
编译产出的可执行文件在`Release`目录下
</details>
**注意:**
1. `TX2`平台的`CUDA`、`CUDNN`需要通过`JetPack`安装。
2. 已提供linux和tx2平台的opencv下载方式,其他环境请自行安装[opencv](https://opencv.org/)
3. Windows用户推荐使用Visual Studio 2019编译
## 二、导出预测模型
将训练保存的权重导出为预测库需要的模型格式,使用PaddleDetection下的```tools/export_model.py```导出模型
```
python tools/export_model.py -c configs/mot/fairmot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.pdparams
```
预测模型会默认导出到```output_inference/fairmot_hrnetv2_w18_dlafpn_30e_576x320```目录下,包括```infer_cfg.yml```, ```model.pdiparams```, ```model.pdiparams.info```, ```model.pdmodel```
导出模型也可以通过[预测模型列表](../README.md)中'算法介绍部分'直接下载使用
## 三、C++预测
完成以上步骤后,可以通过```build/main```(Linux)或```main.exe```(Windows)进行预测,参数列表如下:
| 参数 | 说明 |
| ---- | ---- |
| --track_model_dir | 导出的跟踪预测模型所在路径 |
| --video_file | 要预测的视频文件路径 |
| --device | 运行时的设备,可选择`CPU/GPU/XPU`,默认为`CPU`|
| --gpu_id | 指定进行推理的GPU device id(默认值为0)|
| --run_mode | 使用GPU时,默认为paddle, 可选(paddle/trt_fp32/trt_fp16/trt_int8)|
| --output_dir | 输出图片所在的文件夹, 默认为output |
| --use_mkldnn | CPU预测中是否开启MKLDNN加速 |
| --cpu_threads | 设置cpu线程数,默认为1 |
| --do_entrance_counting | 是否进行出入口流量统计,默认为否 |
| --save_result | 是否保存跟踪结果 |
样例一:
```shell
# 使用CPU测试视频 `test.mp4` , 模型和测试视频均移至`build`目录下
./main --track_model_dir=./fairmot_hrnetv2_w18_dlafpn_30e_576x320 --video_file=test.mp4
# 视频可视化预测结果默认保存在当前目录下output/test.mp4文件中
```
样例二:
```shell
# 使用GPU测试视频 `test.mp4` , 模型和测试视频均移至`build`目录下,实现出入口计数功能,并保存跟踪结果
./main -video_file=test.mp4 -track_model_dir=./fairmot_dla34_30e_1088x608/ --device=gpu --do_entrance_counting=True --save_result=True
# 视频可视化预测结果默认保存在当前目录下`output/test.mp4`中
# 跟踪结果保存在`output/mot_output.txt`中
# 计数结果保存在`output/flow_statistic.txt`中
```
| PaddleDetection/deploy/pptracking/cpp/README.md/0 | {
"file_path": "PaddleDetection/deploy/pptracking/cpp/README.md",
"repo_id": "PaddleDetection",
"token_count": 3966
} | 51 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <math.h>
#include <sys/types.h>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <string>
#include <vector>
#ifdef _WIN32
#include <direct.h>
#include <io.h>
#else
#include <stdarg.h>
#include <sys/stat.h>
#endif
#include <gflags/gflags.h>
#include "include/pipeline.h"
DEFINE_string(video_file, "", "Path of input video.");
DEFINE_string(video_other_file,
"",
"Path of other input video used for MTMCT.");
DEFINE_string(device,
"CPU",
"Choose the device you want to run, it can be: CPU/GPU/XPU, "
"default is CPU.");
DEFINE_double(threshold, 0.5, "Threshold of score.");
DEFINE_string(output_dir, "output", "Directory of output visualization files.");
DEFINE_string(run_mode,
"paddle",
"Mode of running(paddle/trt_fp32/trt_fp16/trt_int8)");
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute");
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU");
DEFINE_int32(cpu_threads, 1, "Num of threads with CPU");
DEFINE_bool(trt_calib_mode,
false,
"If the model is produced by TRT offline quantitative calibration, "
"trt_calib_mode need to set True");
DEFINE_bool(tiny_obj, false, "Whether tracking tiny object");
DEFINE_bool(do_entrance_counting,
false,
"Whether counting the numbers of identifiers entering "
"or getting out from the entrance.");
DEFINE_int32(secs_interval, 10, "The seconds interval to count after tracking");
DEFINE_bool(save_result, false, "Whether saving result after tracking");
DEFINE_string(
scene,
"",
"scene of tracking system, it can be : pedestrian/vehicle/multiclass");
DEFINE_bool(is_mtmct, false, "Whether use multi-target multi-camera tracking");
DEFINE_string(track_model_dir, "", "Path of tracking model");
DEFINE_string(det_model_dir, "", "Path of detection model");
DEFINE_string(reid_model_dir, "", "Path of reid model");
static std::string DirName(const std::string& filepath) {
auto pos = filepath.rfind(OS_PATH_SEP);
if (pos == std::string::npos) {
return "";
}
return filepath.substr(0, pos);
}
static bool PathExists(const std::string& path) {
#ifdef _WIN32
struct _stat buffer;
return (_stat(path.c_str(), &buffer) == 0);
#else
struct stat buffer;
return (stat(path.c_str(), &buffer) == 0);
#endif // !_WIN32
}
static void MkDir(const std::string& path) {
if (PathExists(path)) return;
int ret = 0;
#ifdef _WIN32
ret = _mkdir(path.c_str());
#else
ret = mkdir(path.c_str(), 0755);
#endif // !_WIN32
if (ret != 0) {
std::string path_error(path);
path_error += " mkdir failed!";
throw std::runtime_error(path_error);
}
}
static void MkDirs(const std::string& path) {
if (path.empty()) return;
if (PathExists(path)) return;
MkDirs(DirName(path));
MkDir(path);
}
int main(int argc, char** argv) {
// Parsing command-line
google::ParseCommandLineFlags(&argc, &argv, true);
bool has_model_dir =
!(FLAGS_track_model_dir.empty() && FLAGS_det_model_dir.empty() &&
FLAGS_reid_model_dir.empty());
if (FLAGS_video_file.empty() || (FLAGS_scene.empty() && !has_model_dir)) {
LOG(ERROR) << "Usage: \n"
<< "1. ./main -video_file=/PATH/TO/INPUT/IMAGE/ "
<< "-scene=pedestrian/vehicle/multiclass\n"
<< "2. ./main -video_file=/PATH/TO/INPUT/IMAGE/ "
<< "-track_model_dir=/PATH/TO/MODEL_DIR" << std::endl;
return -1;
}
if (!(FLAGS_run_mode == "paddle" || FLAGS_run_mode == "trt_fp32" ||
FLAGS_run_mode == "trt_fp16" || FLAGS_run_mode == "trt_int8")) {
LOG(ERROR)
<< "run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or 'trt_int8'.";
return -1;
}
transform(FLAGS_device.begin(),
FLAGS_device.end(),
FLAGS_device.begin(),
::toupper);
if (!(FLAGS_device == "CPU" || FLAGS_device == "GPU" ||
FLAGS_device == "XPU")) {
LOG(ERROR) << "device should be 'CPU', 'GPU' or 'XPU'.";
return -1;
}
if (!PathExists(FLAGS_output_dir)) {
MkDirs(FLAGS_output_dir);
}
PaddleDetection::Pipeline pipeline(FLAGS_device,
FLAGS_threshold,
FLAGS_output_dir,
FLAGS_run_mode,
FLAGS_gpu_id,
FLAGS_use_mkldnn,
FLAGS_cpu_threads,
FLAGS_trt_calib_mode,
FLAGS_do_entrance_counting,
FLAGS_save_result,
FLAGS_scene,
FLAGS_tiny_obj,
FLAGS_is_mtmct,
FLAGS_secs_interval,
FLAGS_track_model_dir,
FLAGS_det_model_dir,
FLAGS_reid_model_dir);
pipeline.SetInput(FLAGS_video_file);
if (!FLAGS_video_other_file.empty()) {
pipeline.SetInput(FLAGS_video_other_file);
}
pipeline.Run();
return 0;
}
| PaddleDetection/deploy/pptracking/cpp/src/main.cc/0 | {
"file_path": "PaddleDetection/deploy/pptracking/cpp/src/main.cc",
"repo_id": "PaddleDetection",
"token_count": 2768
} | 52 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/LCFractal/AIC21-MTMC/tree/main/reid/reid-matching/tools
"""
import os
import re
import cv2
import gc
import numpy as np
import pandas as pd
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
__all__ = [
'parse_pt', 'parse_bias', 'get_dire', 'parse_pt_gt',
'compare_dataframes_mtmc', 'get_sim_matrix', 'get_labels', 'getData',
'gen_new_mot'
]
def parse_pt(mot_feature, zones=None):
mot_list = dict()
for line in mot_feature:
fid = int(re.sub('[a-z,A-Z]', "", mot_feature[line]['frame']))
tid = mot_feature[line]['id']
bbox = list(map(lambda x: int(float(x)), mot_feature[line]['bbox']))
if tid not in mot_list:
mot_list[tid] = dict()
out_dict = mot_feature[line]
if zones is not None:
out_dict['zone'] = zones.get_zone(bbox)
else:
out_dict['zone'] = None
mot_list[tid][fid] = out_dict
return mot_list
def gen_new_mot(mot_list):
out_dict = dict()
for tracklet in mot_list:
tracklet = mot_list[tracklet]
for f in tracklet:
out_dict[tracklet[f]['imgname']] = tracklet[f]
return out_dict
def mergesetfeat1_notrk(P, neg_vector, in_feats, in_labels):
out_feats = []
for i in range(in_feats.shape[0]):
camera_id = in_labels[i, 1]
feat = in_feats[i] - neg_vector[camera_id]
feat = P[camera_id].dot(feat)
feat = feat / np.linalg.norm(feat, ord=2)
out_feats.append(feat)
out_feats = np.vstack(out_feats)
return out_feats
def compute_P2(prb_feats, gal_feats, gal_labels, la=3.0):
X = gal_feats
neg_vector = {}
u_labels = np.unique(gal_labels[:, 1])
P = {}
for label in u_labels:
curX = gal_feats[gal_labels[:, 1] == label, :]
neg_vector[label] = np.mean(curX, axis=0)
P[label] = np.linalg.inv(
curX.T.dot(curX) + curX.shape[0] * la * np.eye(X.shape[1]))
return P, neg_vector
def parse_bias(cameras_bias):
cid_bias = dict()
for cameras in cameras_bias.keys():
cameras_id = re.sub('[a-z,A-Z]', "", cameras)
cameras_id = int(cameras_id)
bias = cameras_bias[cameras]
cid_bias[cameras_id] = float(bias)
return cid_bias
def get_dire(zone_list, cid):
zs, ze = zone_list[0], zone_list[-1]
return (zs, ze)
def intracam_ignore(st_mask, cid_tids):
count = len(cid_tids)
for i in range(count):
for j in range(count):
if cid_tids[i][0] == cid_tids[j][0]:
st_mask[i, j] = 0.
return st_mask
def mergesetfeat(in_feats, in_labels, in_tracks):
trackset = list(set(list(in_tracks)))
out_feats = []
out_labels = []
for track in trackset:
feat = np.mean(in_feats[in_tracks == track], axis=0)
feat = feat / np.linalg.norm(feat, ord=2)
label = in_labels[in_tracks == track][0]
out_feats.append(feat)
out_labels.append(label)
out_feats = np.vstack(out_feats)
out_labels = np.vstack(out_labels)
return out_feats, out_labels
def mergesetfeat3(X, labels, gX, glabels, beta=0.08, knn=20, lr=0.5):
for i in range(0, X.shape[0]):
if i % 1000 == 0:
print('feat3:%d/%d' % (i, X.shape[0]))
knnX = gX[glabels[:, 1] != labels[i, 1], :]
sim = knnX.dot(X[i, :])
knnX = knnX[sim > 0, :]
sim = sim[sim > 0]
if len(sim) > 0:
idx = np.argsort(-sim)
if len(sim) > 2 * knn:
sim = sim[idx[:2 * knn]]
knnX = knnX[idx[:2 * knn], :]
else:
sim = sim[idx]
knnX = knnX[idx, :]
knn = min(knn, len(sim))
knn_pos_weight = np.exp((sim[:knn] - 1) / beta)
knn_neg_weight = np.ones(len(sim) - knn)
knn_pos_prob = knn_pos_weight / np.sum(knn_pos_weight)
knn_neg_prob = knn_neg_weight / np.sum(knn_neg_weight)
X[i, :] += lr * (knn_pos_prob.dot(knnX[:knn, :]) -
knn_neg_prob.dot(knnX[knn:, :]))
X[i, :] /= np.linalg.norm(X[i, :])
return X
def run_fic(prb_feats, gal_feats, prb_labels, gal_labels, la=3.0):
P, neg_vector = compute_P2(prb_feats, gal_feats, gal_labels, la)
prb_feats_new = mergesetfeat1_notrk(P, neg_vector, prb_feats, prb_labels)
gal_feats_new = mergesetfeat1_notrk(P, neg_vector, gal_feats, gal_labels)
return prb_feats_new, gal_feats_new
def run_fac(prb_feats,
gal_feats,
prb_labels,
gal_labels,
beta=0.08,
knn=20,
lr=0.5,
prb_epoch=2,
gal_epoch=3):
gal_feats_new = gal_feats.copy()
for i in range(prb_epoch):
gal_feats_new = mergesetfeat3(gal_feats_new, gal_labels, gal_feats,
gal_labels, beta, knn, lr)
prb_feats_new = prb_feats.copy()
for i in range(gal_epoch):
prb_feats_new = mergesetfeat3(prb_feats_new, prb_labels, gal_feats_new,
gal_labels, beta, knn, lr)
return prb_feats_new, gal_feats_new
def euclidean_distance(qf, gf):
m = qf.shape[0]
n = gf.shape[0]
dist_mat = 2 - 2 * np.matmul(qf, gf.T)
return dist_mat
def find_topk(a, k, axis=-1, largest=True, sorted=True):
if axis is None:
axis_size = a.size
else:
axis_size = a.shape[axis]
assert 1 <= k <= axis_size
a = np.asanyarray(a)
if largest:
index_array = np.argpartition(a, axis_size - k, axis=axis)
topk_indices = np.take(index_array, -np.arange(k) - 1, axis=axis)
else:
index_array = np.argpartition(a, k - 1, axis=axis)
topk_indices = np.take(index_array, np.arange(k), axis=axis)
topk_values = np.take_along_axis(a, topk_indices, axis=axis)
if sorted:
sorted_indices_in_topk = np.argsort(topk_values, axis=axis)
if largest:
sorted_indices_in_topk = np.flip(sorted_indices_in_topk, axis=axis)
sorted_topk_values = np.take_along_axis(
topk_values, sorted_indices_in_topk, axis=axis)
sorted_topk_indices = np.take_along_axis(
topk_indices, sorted_indices_in_topk, axis=axis)
return sorted_topk_values, sorted_topk_indices
return topk_values, topk_indices
def batch_numpy_topk(qf, gf, k1, N=6000):
m = qf.shape[0]
n = gf.shape[0]
initial_rank = []
for j in range(n // N + 1):
temp_gf = gf[j * N:j * N + N]
temp_qd = []
for i in range(m // N + 1):
temp_qf = qf[i * N:i * N + N]
temp_d = euclidean_distance(temp_qf, temp_gf)
temp_qd.append(temp_d)
temp_qd = np.concatenate(temp_qd, axis=0)
temp_qd = temp_qd / (np.max(temp_qd, axis=0)[0])
temp_qd = temp_qd.T
initial_rank.append(
find_topk(
temp_qd, k=k1, axis=1, largest=False, sorted=True)[1])
del temp_qd
del temp_gf
del temp_qf
del temp_d
initial_rank = np.concatenate(initial_rank, axis=0)
return initial_rank
def batch_euclidean_distance(qf, gf, N=6000):
m = qf.shape[0]
n = gf.shape[0]
dist_mat = []
for j in range(n // N + 1):
temp_gf = gf[j * N:j * N + N]
temp_qd = []
for i in range(m // N + 1):
temp_qf = qf[i * N:i * N + N]
temp_d = euclidean_distance(temp_qf, temp_gf)
temp_qd.append(temp_d)
temp_qd = np.concatenate(temp_qd, axis=0)
temp_qd = temp_qd / (np.max(temp_qd, axis=0)[0])
dist_mat.append(temp_qd.T)
del temp_qd
del temp_gf
del temp_qf
del temp_d
dist_mat = np.concatenate(dist_mat, axis=0)
return dist_mat
def batch_v(feat, R, all_num):
V = np.zeros((all_num, all_num), dtype=np.float32)
m = feat.shape[0]
for i in tqdm(range(m)):
temp_gf = feat[i].reshape(1, -1)
temp_qd = euclidean_distance(temp_gf, feat)
temp_qd = temp_qd / (np.max(temp_qd))
temp_qd = temp_qd.reshape(-1)
temp_qd = temp_qd[R[i].tolist()]
weight = np.exp(-temp_qd)
weight = weight / np.sum(weight)
V[i, R[i]] = weight.astype(np.float32)
return V
def k_reciprocal_neigh(initial_rank, i, k1):
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
return forward_k_neigh_index[fi]
def ReRank2(probFea, galFea, k1=20, k2=6, lambda_value=0.3):
query_num = probFea.shape[0]
all_num = query_num + galFea.shape[0]
feat = np.concatenate((probFea, galFea), axis=0)
initial_rank = batch_numpy_topk(feat, feat, k1 + 1, N=6000)
del probFea
del galFea
gc.collect() # empty memory
R = []
for i in tqdm(range(all_num)):
# k-reciprocal neighbors
k_reciprocal_index = k_reciprocal_neigh(initial_rank, i, k1)
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_k_reciprocal_index = k_reciprocal_neigh(
initial_rank, candidate, int(np.around(k1 / 2)))
if len(
np.intersect1d(candidate_k_reciprocal_index,
k_reciprocal_index)) > 2. / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(
k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
R.append(k_reciprocal_expansion_index)
gc.collect() # empty memory
V = batch_v(feat, R, all_num)
del R
gc.collect() # empty memory
initial_rank = initial_rank[:, :k2]
# Faster version
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i], :], axis=0)
V = V_qe
del V_qe
del initial_rank
gc.collect() # empty memory
invIndex = []
for i in range(all_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros((query_num, all_num), dtype=np.float32)
for i in tqdm(range(query_num)):
temp_min = np.zeros(shape=[1, all_num], dtype=np.float32)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(
V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
del V
gc.collect() # empty memory
original_dist = batch_euclidean_distance(feat, feat[:query_num, :])
final_dist = jaccard_dist * (1 - lambda_value
) + original_dist * lambda_value
del original_dist
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
def visual_rerank(prb_feats,
gal_feats,
cid_tids,
use_ff=False,
use_rerank=False):
"""Rerank by visual cures."""
gal_labels = np.array([[0, item[0]] for item in cid_tids])
prb_labels = gal_labels.copy()
if use_ff:
print('current use ff finetuned parameters....')
# Step1-1: fic. finetuned parameters: [la]
prb_feats, gal_feats = run_fic(prb_feats, gal_feats, prb_labels,
gal_labels, 3.0)
# Step1=2: fac. finetuned parameters: [beta,knn,lr,prb_epoch,gal_epoch]
prb_feats, gal_feats = run_fac(prb_feats, gal_feats, prb_labels,
gal_labels, 0.08, 20, 0.5, 1, 1)
if use_rerank:
print('current use rerank finetuned parameters....')
# Step2: k-reciprocal. finetuned parameters: [k1,k2,lambda_value]
sims = ReRank2(prb_feats, gal_feats, 20, 3, 0.3)
else:
sims = 1.0 - np.dot(prb_feats, gal_feats.T)
# NOTE: sims here is actually dist, the smaller the more similar
return 1.0 - sims
def normalize(nparray, axis=0):
try:
from sklearn import preprocessing
except Exception as e:
raise RuntimeError(
'Unable to use sklearn in MTMCT in PP-Tracking, please install sklearn, for example: `pip install sklearn`'
)
nparray = preprocessing.normalize(nparray, norm='l2', axis=axis)
return nparray
def get_match(cluster_labels):
cluster_dict = dict()
cluster = list()
for i, l in enumerate(cluster_labels):
if l in list(cluster_dict.keys()):
cluster_dict[l].append(i)
else:
cluster_dict[l] = [i]
for idx in cluster_dict:
cluster.append(cluster_dict[idx])
return cluster
def get_cid_tid(cluster_labels, cid_tids):
cluster = list()
for labels in cluster_labels:
cid_tid_list = list()
for label in labels:
cid_tid_list.append(cid_tids[label])
cluster.append(cid_tid_list)
return cluster
def combin_feature(cid_tid_dict, sub_cluster):
for sub_ct in sub_cluster:
if len(sub_ct) < 2: continue
mean_feat = np.array([cid_tid_dict[i]['mean_feat'] for i in sub_ct])
for i in sub_ct:
cid_tid_dict[i]['mean_feat'] = mean_feat.mean(axis=0)
return cid_tid_dict
def combin_cluster(sub_labels, cid_tids):
cluster = list()
for sub_c_to_c in sub_labels:
if len(cluster) < 1:
cluster = sub_labels[sub_c_to_c]
continue
for c_ts in sub_labels[sub_c_to_c]:
is_add = False
for i_c, c_set in enumerate(cluster):
if len(set(c_ts) & set(c_set)) > 0:
new_list = list(set(c_ts) | set(c_set))
cluster[i_c] = new_list
is_add = True
break
if not is_add:
cluster.append(c_ts)
labels = list()
num_tr = 0
for c_ts in cluster:
label_list = list()
for c_t in c_ts:
label_list.append(cid_tids.index(c_t))
num_tr += 1
label_list.sort()
labels.append(label_list)
return labels, cluster
def parse_pt_gt(mot_feature):
img_rects = dict()
for line in mot_feature:
fid = int(re.sub('[a-z,A-Z]', "", mot_feature[line]['frame']))
tid = mot_feature[line]['id']
rect = list(map(lambda x: int(float(x)), mot_feature[line]['bbox']))
if fid not in img_rects:
img_rects[fid] = list()
rect.insert(0, tid)
img_rects[fid].append(rect)
return img_rects
# eval result
def compare_dataframes_mtmc(gts, ts):
try:
import motmetrics as mm
except Exception as e:
raise RuntimeError(
'Unable to use motmetrics in MTMCT in PP-Tracking, please install motmetrics, for example: `pip install motmetrics`, see https://github.com/longcw/py-motmetrics'
)
"""Compute ID-based evaluation metrics for MTMCT
Return:
df (pandas.DataFrame): Results of the evaluations in a df with only the 'idf1', 'idp', and 'idr' columns.
"""
gtds = []
tsds = []
gtcams = gts['CameraId'].drop_duplicates().tolist()
tscams = ts['CameraId'].drop_duplicates().tolist()
maxFrameId = 0
for k in sorted(gtcams):
gtd = gts.query('CameraId == %d' % k)
gtd = gtd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
# max FrameId in gtd only
mfid = gtd['FrameId'].max()
gtd['FrameId'] += maxFrameId
gtd = gtd.set_index(['FrameId', 'Id'])
gtds.append(gtd)
if k in tscams:
tsd = ts.query('CameraId == %d' % k)
tsd = tsd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
# max FrameId among both gtd and tsd
mfid = max(mfid, tsd['FrameId'].max())
tsd['FrameId'] += maxFrameId
tsd = tsd.set_index(['FrameId', 'Id'])
tsds.append(tsd)
maxFrameId += mfid
# compute multi-camera tracking evaluation stats
multiCamAcc = mm.utils.compare_to_groundtruth(
pd.concat(gtds), pd.concat(tsds), 'iou')
metrics = list(mm.metrics.motchallenge_metrics)
metrics.extend(['num_frames', 'idfp', 'idfn', 'idtp'])
mh = mm.metrics.create()
summary = mh.compute(multiCamAcc, metrics=metrics, name='MultiCam')
return summary
def get_sim_matrix(cid_tid_dict,
cid_tids,
use_ff=True,
use_rerank=True,
use_st_filter=False):
# Note: camera independent get_sim_matrix function,
# which is different from the one in camera_utils.py.
count = len(cid_tids)
q_arr = np.array(
[cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
g_arr = np.array(
[cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
q_arr = normalize(q_arr, axis=1)
g_arr = normalize(g_arr, axis=1)
st_mask = np.ones((count, count), dtype=np.float32)
st_mask = intracam_ignore(st_mask, cid_tids)
visual_sim_matrix = visual_rerank(
q_arr, g_arr, cid_tids, use_ff=use_ff, use_rerank=use_rerank)
visual_sim_matrix = visual_sim_matrix.astype('float32')
np.set_printoptions(precision=3)
sim_matrix = visual_sim_matrix * st_mask
np.fill_diagonal(sim_matrix, 0)
return sim_matrix
def get_labels(cid_tid_dict,
cid_tids,
use_ff=True,
use_rerank=True,
use_st_filter=False):
try:
from sklearn.cluster import AgglomerativeClustering
except Exception as e:
raise RuntimeError(
'Unable to use sklearn in MTMCT in PP-Tracking, please install sklearn, for example: `pip install sklearn`'
)
# 1st cluster
sim_matrix = get_sim_matrix(
cid_tid_dict,
cid_tids,
use_ff=use_ff,
use_rerank=use_rerank,
use_st_filter=use_st_filter)
cluster_labels = AgglomerativeClustering(
n_clusters=None,
distance_threshold=0.5,
affinity='precomputed',
linkage='complete').fit_predict(1 - sim_matrix)
labels = get_match(cluster_labels)
sub_cluster = get_cid_tid(labels, cid_tids)
# 2nd cluster
cid_tid_dict_new = combin_feature(cid_tid_dict, sub_cluster)
sim_matrix = get_sim_matrix(
cid_tid_dict_new,
cid_tids,
use_ff=use_ff,
use_rerank=use_rerank,
use_st_filter=use_st_filter)
cluster_labels = AgglomerativeClustering(
n_clusters=None,
distance_threshold=0.9,
affinity='precomputed',
linkage='complete').fit_predict(1 - sim_matrix)
labels = get_match(cluster_labels)
sub_cluster = get_cid_tid(labels, cid_tids)
return labels
def getData(fpath, names=None, sep='\s+|\t+|,'):
""" Get the necessary track data from a file handle.
Args:
fpath (str) : Original path of file reading from.
names (list[str]): List of column names for the data.
sep (str): Allowed separators regular expression string.
Return:
df (pandas.DataFrame): Data frame containing the data loaded from the
stream with optionally assigned column names. No index is set on the data.
"""
try:
df = pd.read_csv(
fpath,
sep=sep,
index_col=None,
skipinitialspace=True,
header=None,
names=names,
engine='python')
return df
except Exception as e:
raise ValueError("Could not read input from %s. Error: %s" %
(fpath, repr(e)))
| PaddleDetection/deploy/pptracking/python/mot/mtmct/utils.py/0 | {
"file_path": "PaddleDetection/deploy/pptracking/python/mot/mtmct/utils.py",
"repo_id": "PaddleDetection",
"token_count": 10456
} | 53 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import yaml
import glob
from functools import reduce
from PIL import Image
import cv2
import math
import numpy as np
import paddle
import sys
# add deploy path of PaddleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'])))
sys.path.insert(0, parent_path)
from preprocess import preprocess, NormalizeImage, Permute
from keypoint_preprocess import EvalAffine, TopDownEvalAffine, expand_crop
from keypoint_postprocess import HrHRNetPostProcess, HRNetPostProcess
from visualize import visualize_pose
from paddle.inference import Config
from paddle.inference import create_predictor
from utils import argsparser, Timer, get_current_memory_mb
from benchmark_utils import PaddleInferBenchmark
from infer import Detector, get_test_images, print_arguments
# Global dictionary
KEYPOINT_SUPPORT_MODELS = {
'HigherHRNet': 'keypoint_bottomup',
'HRNet': 'keypoint_topdown'
}
class KeyPointDetector(Detector):
"""
Args:
model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml
device (str): Choose the device you want to run, it can be: CPU/GPU/XPU/NPU, default is CPU
run_mode (str): mode of running(paddle/trt_fp32/trt_fp16)
batch_size (int): size of pre batch in inference
trt_min_shape (int): min shape for dynamic shape in trt
trt_max_shape (int): max shape for dynamic shape in trt
trt_opt_shape (int): opt shape for dynamic shape in trt
trt_calib_mode (bool): If the model is produced by TRT offline quantitative
calibration, trt_calib_mode need to set True
cpu_threads (int): cpu threads
enable_mkldnn (bool): whether to open MKLDNN
use_dark(bool): whether to use postprocess in DarkPose
"""
def __init__(self,
model_dir,
device='CPU',
run_mode='paddle',
batch_size=1,
trt_min_shape=1,
trt_max_shape=1280,
trt_opt_shape=640,
trt_calib_mode=False,
cpu_threads=1,
enable_mkldnn=False,
output_dir='output',
threshold=0.5,
use_dark=True,
use_fd_format=False):
super(KeyPointDetector, self).__init__(
model_dir=model_dir,
device=device,
run_mode=run_mode,
batch_size=batch_size,
trt_min_shape=trt_min_shape,
trt_max_shape=trt_max_shape,
trt_opt_shape=trt_opt_shape,
trt_calib_mode=trt_calib_mode,
cpu_threads=cpu_threads,
enable_mkldnn=enable_mkldnn,
output_dir=output_dir,
threshold=threshold,
use_fd_format=use_fd_format)
self.use_dark = use_dark
def set_config(self, model_dir, use_fd_format):
return PredictConfig_KeyPoint(model_dir, use_fd_format=use_fd_format)
def get_person_from_rect(self, image, results):
# crop the person result from image
self.det_times.preprocess_time_s.start()
valid_rects = results['boxes']
rect_images = []
new_rects = []
org_rects = []
for rect in valid_rects:
rect_image, new_rect, org_rect = expand_crop(image, rect)
if rect_image is None or rect_image.size == 0:
continue
rect_images.append(rect_image)
new_rects.append(new_rect)
org_rects.append(org_rect)
self.det_times.preprocess_time_s.end()
return rect_images, new_rects, org_rects
def postprocess(self, inputs, result):
np_heatmap = result['heatmap']
np_masks = result['masks']
# postprocess output of predictor
if KEYPOINT_SUPPORT_MODELS[
self.pred_config.arch] == 'keypoint_bottomup':
results = {}
h, w = inputs['im_shape'][0]
preds = [np_heatmap]
if np_masks is not None:
preds += np_masks
preds += [h, w]
keypoint_postprocess = HrHRNetPostProcess()
kpts, scores = keypoint_postprocess(*preds)
results['keypoint'] = kpts
results['score'] = scores
return results
elif KEYPOINT_SUPPORT_MODELS[
self.pred_config.arch] == 'keypoint_topdown':
results = {}
imshape = inputs['im_shape'][:, ::-1]
center = np.round(imshape / 2.)
scale = imshape / 200.
keypoint_postprocess = HRNetPostProcess(use_dark=self.use_dark)
kpts, scores = keypoint_postprocess(np_heatmap, center, scale)
results['keypoint'] = kpts
results['score'] = scores
return results
else:
raise ValueError("Unsupported arch: {}, expect {}".format(
self.pred_config.arch, KEYPOINT_SUPPORT_MODELS))
def predict(self, repeats=1):
'''
Args:
repeats (int): repeat number for prediction
Returns:
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape: [N, im_h, im_w]
'''
# model prediction
np_heatmap, np_masks = None, None
for i in range(repeats):
self.predictor.run()
output_names = self.predictor.get_output_names()
heatmap_tensor = self.predictor.get_output_handle(output_names[0])
np_heatmap = heatmap_tensor.copy_to_cpu()
if self.pred_config.tagmap:
masks_tensor = self.predictor.get_output_handle(output_names[1])
heat_k = self.predictor.get_output_handle(output_names[2])
inds_k = self.predictor.get_output_handle(output_names[3])
np_masks = [
masks_tensor.copy_to_cpu(), heat_k.copy_to_cpu(),
inds_k.copy_to_cpu()
]
result = dict(heatmap=np_heatmap, masks=np_masks)
return result
def predict_image(self,
image_list,
run_benchmark=False,
repeats=1,
visual=True):
results = []
batch_loop_cnt = math.ceil(float(len(image_list)) / self.batch_size)
for i in range(batch_loop_cnt):
start_index = i * self.batch_size
end_index = min((i + 1) * self.batch_size, len(image_list))
batch_image_list = image_list[start_index:end_index]
if run_benchmark:
# preprocess
inputs = self.preprocess(batch_image_list) # warmup
self.det_times.preprocess_time_s.start()
inputs = self.preprocess(batch_image_list)
self.det_times.preprocess_time_s.end()
# model prediction
result_warmup = self.predict(repeats=repeats) # warmup
self.det_times.inference_time_s.start()
result = self.predict(repeats=repeats)
self.det_times.inference_time_s.end(repeats=repeats)
# postprocess
result_warmup = self.postprocess(inputs, result) # warmup
self.det_times.postprocess_time_s.start()
result = self.postprocess(inputs, result)
self.det_times.postprocess_time_s.end()
self.det_times.img_num += len(batch_image_list)
cm, gm, gu = get_current_memory_mb()
self.cpu_mem += cm
self.gpu_mem += gm
self.gpu_util += gu
else:
# preprocess
self.det_times.preprocess_time_s.start()
inputs = self.preprocess(batch_image_list)
self.det_times.preprocess_time_s.end()
# model prediction
self.det_times.inference_time_s.start()
result = self.predict()
self.det_times.inference_time_s.end()
# postprocess
self.det_times.postprocess_time_s.start()
result = self.postprocess(inputs, result)
self.det_times.postprocess_time_s.end()
self.det_times.img_num += len(batch_image_list)
if visual:
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
visualize(
batch_image_list,
result,
visual_thresh=self.threshold,
save_dir=self.output_dir)
results.append(result)
if visual:
print('Test iter {}'.format(i))
results = self.merge_batch_result(results)
return results
def predict_video(self, video_file, camera_id):
video_name = 'output.mp4'
if camera_id != -1:
capture = cv2.VideoCapture(camera_id)
else:
capture = cv2.VideoCapture(video_file)
video_name = os.path.split(video_file)[-1]
# Get Video info : resolution, fps, frame count
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print("fps: %d, frame_count: %d" % (fps, frame_count))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
out_path = os.path.join(self.output_dir, video_name)
fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
index = 1
while (1):
ret, frame = capture.read()
if not ret:
break
print('detect frame: %d' % (index))
index += 1
results = self.predict_image([frame[:, :, ::-1]], visual=False)
im_results = {}
im_results['keypoint'] = [results['keypoint'], results['score']]
im = visualize_pose(
frame, im_results, visual_thresh=self.threshold, returnimg=True)
writer.write(im)
if camera_id != -1:
cv2.imshow('Mask Detection', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
writer.release()
def create_inputs(imgs, im_info):
"""generate input for different model type
Args:
imgs (list(numpy)): list of image (np.ndarray)
im_info (list(dict)): list of image info
Returns:
inputs (dict): input of model
"""
inputs = {}
inputs['image'] = np.stack(imgs, axis=0).astype('float32')
im_shape = []
for e in im_info:
im_shape.append(np.array((e['im_shape'])).astype('float32'))
inputs['im_shape'] = np.stack(im_shape, axis=0)
return inputs
class PredictConfig_KeyPoint():
"""set config of preprocess, postprocess and visualize
Args:
model_dir (str): root path of model.yml
"""
def __init__(self, model_dir, use_fd_format=False):
# parsing Yaml config for Preprocess
fd_deploy_file = os.path.join(model_dir, 'inference.yml')
ppdet_deploy_file = os.path.join(model_dir, 'infer_cfg.yml')
if use_fd_format:
if not os.path.exists(fd_deploy_file) and os.path.exists(
ppdet_deploy_file):
raise RuntimeError(
"Non-FD format model detected. Please set `use_fd_format` to False."
)
deploy_file = fd_deploy_file
else:
if not os.path.exists(ppdet_deploy_file) and os.path.exists(
fd_deploy_file):
raise RuntimeError(
"FD format model detected. Please set `use_fd_format` to False."
)
deploy_file = ppdet_deploy_file
with open(deploy_file) as f:
yml_conf = yaml.safe_load(f)
self.check_model(yml_conf)
self.arch = yml_conf['arch']
self.archcls = KEYPOINT_SUPPORT_MODELS[yml_conf['arch']]
self.preprocess_infos = yml_conf['Preprocess']
self.min_subgraph_size = yml_conf['min_subgraph_size']
self.labels = yml_conf['label_list']
self.tagmap = False
self.use_dynamic_shape = yml_conf['use_dynamic_shape']
if 'keypoint_bottomup' == self.archcls:
self.tagmap = True
self.print_config()
def check_model(self, yml_conf):
"""
Raises:
ValueError: loaded model not in supported model type
"""
for support_model in KEYPOINT_SUPPORT_MODELS:
if support_model in yml_conf['arch']:
return True
raise ValueError("Unsupported arch: {}, expect {}".format(yml_conf[
'arch'], KEYPOINT_SUPPORT_MODELS))
def print_config(self):
print('----------- Model Configuration -----------')
print('%s: %s' % ('Model Arch', self.arch))
print('%s: ' % ('Transform Order'))
for op_info in self.preprocess_infos:
print('--%s: %s' % ('transform op', op_info['type']))
print('--------------------------------------------')
def visualize(image_list, results, visual_thresh=0.6, save_dir='output'):
im_results = {}
for i, image_file in enumerate(image_list):
skeletons = results['keypoint']
scores = results['score']
skeleton = skeletons[i:i + 1]
score = scores[i:i + 1]
im_results['keypoint'] = [skeleton, score]
visualize_pose(
image_file,
im_results,
visual_thresh=visual_thresh,
save_dir=save_dir)
def main():
detector = KeyPointDetector(
FLAGS.model_dir,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
batch_size=FLAGS.batch_size,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
threshold=FLAGS.threshold,
output_dir=FLAGS.output_dir,
use_dark=FLAGS.use_dark,
use_fd_format=FLAGS.use_fd_format)
# predict from video file or camera video stream
if FLAGS.video_file is not None or FLAGS.camera_id != -1:
detector.predict_video(FLAGS.video_file, FLAGS.camera_id)
else:
# predict from image
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)
if not FLAGS.run_benchmark:
detector.det_times.info(average=True)
else:
mems = {
'cpu_rss_mb': detector.cpu_mem / len(img_list),
'gpu_rss_mb': detector.gpu_mem / len(img_list),
'gpu_util': detector.gpu_util * 100 / len(img_list)
}
perf_info = detector.det_times.report(average=True)
model_dir = FLAGS.model_dir
mode = FLAGS.run_mode
model_info = {
'model_name': model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
data_info = {
'batch_size': 1,
'shape': "dynamic_shape",
'data_num': perf_info['img_num']
}
det_log = PaddleInferBenchmark(detector.config, model_info,
data_info, perf_info, mems)
det_log('KeyPoint')
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
print_arguments(FLAGS)
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU', 'NPU'
], "device should be CPU, GPU, XPU or NPU"
assert not FLAGS.use_gpu, "use_gpu has been deprecated, please use --device"
main()
| PaddleDetection/deploy/python/keypoint_infer.py/0 | {
"file_path": "PaddleDetection/deploy/python/keypoint_infer.py",
"repo_id": "PaddleDetection",
"token_count": 8388
} | 54 |
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "core/general-server/op/mask_rcnn_r50_fpn_1x_coco.h"
#include "core/predictor/framework/infer.h"
#include "core/predictor/framework/memory.h"
#include "core/predictor/framework/resource.h"
#include "core/util/include/timer.h"
#include <algorithm>
#include <iostream>
#include <memory>
#include <sstream>
namespace baidu {
namespace paddle_serving {
namespace serving {
using baidu::paddle_serving::Timer;
using baidu::paddle_serving::predictor::InferManager;
using baidu::paddle_serving::predictor::MempoolWrapper;
using baidu::paddle_serving::predictor::PaddleGeneralModelConfig;
using baidu::paddle_serving::predictor::general_model::Request;
using baidu::paddle_serving::predictor::general_model::Response;
using baidu::paddle_serving::predictor::general_model::Tensor;
int mask_rcnn_r50_fpn_1x_coco::inference() {
VLOG(2) << "Going to run inference";
const std::vector<std::string> pre_node_names = pre_names();
if (pre_node_names.size() != 1) {
LOG(ERROR) << "This op(" << op_name()
<< ") can only have one predecessor op, but received "
<< pre_node_names.size();
return -1;
}
const std::string pre_name = pre_node_names[0];
const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name);
if (!input_blob) {
LOG(ERROR) << "input_blob is nullptr,error";
return -1;
}
uint64_t log_id = input_blob->GetLogId();
VLOG(2) << "(logid=" << log_id << ") Get precedent op name: " << pre_name;
GeneralBlob *output_blob = mutable_data<GeneralBlob>();
if (!output_blob) {
LOG(ERROR) << "output_blob is nullptr,error";
return -1;
}
output_blob->SetLogId(log_id);
if (!input_blob) {
LOG(ERROR) << "(logid=" << log_id
<< ") Failed mutable depended argument, op:" << pre_name;
return -1;
}
const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->_batch_size;
output_blob->_batch_size = batch_size;
VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size;
Timer timeline;
int64_t start = timeline.TimeStampUS();
timeline.Start();
// only support string type
char *total_input_ptr = static_cast<char *>(in->at(0).data.data());
std::string base64str = total_input_ptr;
cv::Mat img = Base2Mat(base64str);
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
// preprocess
Resize(&img, scale_factor_h, scale_factor_w, im_shape_h, im_shape_w);
Normalize(&img, mean_, scale_, is_scale_);
PadStride(&img, 32);
int input_shape_h = img.rows;
int input_shape_w = img.cols;
std::vector<float> input(1 * 3 * input_shape_h * input_shape_w, 0.0f);
Permute(img, input.data());
// create real_in
TensorVector *real_in = new TensorVector();
if (!real_in) {
LOG(ERROR) << "real_in is nullptr,error";
return -1;
}
int in_num = 0;
size_t databuf_size = 0;
void *databuf_data = NULL;
char *databuf_char = NULL;
// im_shape
std::vector<float> im_shape{static_cast<float>(im_shape_h),
static_cast<float>(im_shape_w)};
databuf_size = 2 * sizeof(float);
databuf_data = MempoolWrapper::instance().malloc(databuf_size);
if (!databuf_data) {
LOG(ERROR) << "Malloc failed, size: " << databuf_size;
return -1;
}
memcpy(databuf_data, im_shape.data(), databuf_size);
databuf_char = reinterpret_cast<char *>(databuf_data);
paddle::PaddleBuf paddleBuf_0(databuf_char, databuf_size);
paddle::PaddleTensor tensor_in_0;
tensor_in_0.name = "im_shape";
tensor_in_0.dtype = paddle::PaddleDType::FLOAT32;
tensor_in_0.shape = {1, 2};
tensor_in_0.lod = in->at(0).lod;
tensor_in_0.data = paddleBuf_0;
real_in->push_back(tensor_in_0);
// image
in_num = 1 * 3 * input_shape_h * input_shape_w;
databuf_size = in_num * sizeof(float);
databuf_data = MempoolWrapper::instance().malloc(databuf_size);
if (!databuf_data) {
LOG(ERROR) << "Malloc failed, size: " << databuf_size;
return -1;
}
memcpy(databuf_data, input.data(), databuf_size);
databuf_char = reinterpret_cast<char *>(databuf_data);
paddle::PaddleBuf paddleBuf_1(databuf_char, databuf_size);
paddle::PaddleTensor tensor_in_1;
tensor_in_1.name = "image";
tensor_in_1.dtype = paddle::PaddleDType::FLOAT32;
tensor_in_1.shape = {1, 3, input_shape_h, input_shape_w};
tensor_in_1.lod = in->at(0).lod;
tensor_in_1.data = paddleBuf_1;
real_in->push_back(tensor_in_1);
// scale_factor
std::vector<float> scale_factor{scale_factor_h, scale_factor_w};
databuf_size = 2 * sizeof(float);
databuf_data = MempoolWrapper::instance().malloc(databuf_size);
if (!databuf_data) {
LOG(ERROR) << "Malloc failed, size: " << databuf_size;
return -1;
}
memcpy(databuf_data, scale_factor.data(), databuf_size);
databuf_char = reinterpret_cast<char *>(databuf_data);
paddle::PaddleBuf paddleBuf_2(databuf_char, databuf_size);
paddle::PaddleTensor tensor_in_2;
tensor_in_2.name = "scale_factor";
tensor_in_2.dtype = paddle::PaddleDType::FLOAT32;
tensor_in_2.shape = {1, 2};
tensor_in_2.lod = in->at(0).lod;
tensor_in_2.data = paddleBuf_2;
real_in->push_back(tensor_in_2);
if (InferManager::instance().infer(engine_name().c_str(), real_in, out,
batch_size)) {
LOG(ERROR) << "(logid=" << log_id
<< ") Failed do infer in fluid model: " << engine_name().c_str();
return -1;
}
int64_t end = timeline.TimeStampUS();
CopyBlobInfo(input_blob, output_blob);
AddBlobInfo(output_blob, start);
AddBlobInfo(output_blob, end);
return 0;
}
void mask_rcnn_r50_fpn_1x_coco::Resize(cv::Mat *img, float &scale_factor_h,
float &scale_factor_w, int &im_shape_h,
int &im_shape_w) {
// keep_ratio
int im_size_max = std::max(img->rows, img->cols);
int im_size_min = std::min(img->rows, img->cols);
int target_size_max = std::max(im_shape_h, im_shape_w);
int target_size_min = std::min(im_shape_h, im_shape_w);
float scale_min =
static_cast<float>(target_size_min) / static_cast<float>(im_size_min);
float scale_max =
static_cast<float>(target_size_max) / static_cast<float>(im_size_max);
float scale_ratio = std::min(scale_min, scale_max);
// scale_factor
scale_factor_h = scale_ratio;
scale_factor_w = scale_ratio;
// Resize
cv::resize(*img, *img, cv::Size(), scale_ratio, scale_ratio, 2);
im_shape_h = img->rows;
im_shape_w = img->cols;
}
void mask_rcnn_r50_fpn_1x_coco::Normalize(cv::Mat *img,
const std::vector<float> &mean,
const std::vector<float> &scale,
const bool is_scale) {
// Normalize
double e = 1.0;
if (is_scale) {
e /= 255.0;
}
(*img).convertTo(*img, CV_32FC3, e);
for (int h = 0; h < img->rows; h++) {
for (int w = 0; w < img->cols; w++) {
img->at<cv::Vec3f>(h, w)[0] =
(img->at<cv::Vec3f>(h, w)[0] - mean[0]) / scale[0];
img->at<cv::Vec3f>(h, w)[1] =
(img->at<cv::Vec3f>(h, w)[1] - mean[1]) / scale[1];
img->at<cv::Vec3f>(h, w)[2] =
(img->at<cv::Vec3f>(h, w)[2] - mean[2]) / scale[2];
}
}
}
void mask_rcnn_r50_fpn_1x_coco::PadStride(cv::Mat *img, int stride_) {
// PadStride
if (stride_ <= 0)
return;
int rh = img->rows;
int rw = img->cols;
int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_;
int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_;
cv::copyMakeBorder(*img, *img, 0, nh - rh, 0, nw - rw, cv::BORDER_CONSTANT,
cv::Scalar(0));
}
void mask_rcnn_r50_fpn_1x_coco::Permute(const cv::Mat &img, float *data) {
// Permute
int rh = img.rows;
int rw = img.cols;
int rc = img.channels();
for (int i = 0; i < rc; ++i) {
cv::extractChannel(img, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i);
}
}
cv::Mat mask_rcnn_r50_fpn_1x_coco::Base2Mat(std::string &base64_data) {
cv::Mat img;
std::string s_mat;
s_mat = base64Decode(base64_data.data(), base64_data.size());
std::vector<char> base64_img(s_mat.begin(), s_mat.end());
img = cv::imdecode(base64_img, cv::IMREAD_COLOR); // CV_LOAD_IMAGE_COLOR
return img;
}
std::string mask_rcnn_r50_fpn_1x_coco::base64Decode(const char *Data,
int DataByte) {
const char DecodeTable[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
62, // '+'
0, 0, 0,
63, // '/'
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // '0'-'9'
0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // 'A'-'Z'
0, 0, 0, 0, 0, 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // 'a'-'z'
};
std::string strDecode;
int nValue;
int i = 0;
while (i < DataByte) {
if (*Data != '\r' && *Data != '\n') {
nValue = DecodeTable[*Data++] << 18;
nValue += DecodeTable[*Data++] << 12;
strDecode += (nValue & 0x00FF0000) >> 16;
if (*Data != '=') {
nValue += DecodeTable[*Data++] << 6;
strDecode += (nValue & 0x0000FF00) >> 8;
if (*Data != '=') {
nValue += DecodeTable[*Data++];
strDecode += nValue & 0x000000FF;
}
}
i += 4;
} else // 回车换行,跳过
{
Data++;
i++;
}
}
return strDecode;
}
DEFINE_OP(mask_rcnn_r50_fpn_1x_coco);
} // namespace serving
} // namespace paddle_serving
} // namespace baidu
| PaddleDetection/deploy/serving/cpp/preprocess/mask_rcnn_r50_fpn_1x_coco.cpp/0 | {
"file_path": "PaddleDetection/deploy/serving/cpp/preprocess/mask_rcnn_r50_fpn_1x_coco.cpp",
"repo_id": "PaddleDetection",
"token_count": 4814
} | 55 |
#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
worker_num: 20
#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port
http_port: 18093
rpc_port: 9993
dag:
#op资源类型, True, 为线程模型;False,为进程模型
is_thread_op: False
op:
#op名称,与web_service中的TIPCExampleService初始化name参数一致
ppdet:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency: 1
#当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
local_service_conf:
#uci模型路径
model_config: "./serving_server"
#计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
device_type:
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices: "0" # "0,1"
#client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测
client_type: local_predictor
| PaddleDetection/deploy/serving/python/config.yml/0 | {
"file_path": "PaddleDetection/deploy/serving/python/config.yml",
"repo_id": "PaddleDetection",
"token_count": 856
} | 56 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/stack_allocator.h>
#ifdef __cplusplus
extern "C" {
#endif
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t error_code) {
printf("TVMPlatformAbort: %d\n", error_code);
printf("EXITTHESIM\n");
exit(-1);
}
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev,
void **out_ptr) {
return kTvmErrorFunctionCallNotImplemented;
}
tvm_crt_error_t TVMPlatformMemoryFree(void *ptr, DLDevice dev) {
return kTvmErrorFunctionCallNotImplemented;
}
void TVMLogf(const char *msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stdout, msg, args);
va_end(args);
}
TVM_DLL int TVMFuncRegisterGlobal(const char *name, TVMFunctionHandle f,
int override) {
return 0;
}
#ifdef __cplusplus
}
#endif
| PaddleDetection/deploy/third_engine/demo_avh/include/tvm_runtime.h/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_avh/include/tvm_runtime.h",
"repo_id": "PaddleDetection",
"token_count": 629
} | 57 |
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// reference from https://github.com/RangiLyu/nanodet/tree/main/demo_mnn
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "keypoint_detector.h"
#include "picodet_mnn.h"
#define __SAVE_RESULT__ // if defined save drawed results to ../results, else
// show it in windows
using namespace PaddleDetection;
struct object_rect {
int x;
int y;
int width;
int height;
};
int resize_uniform(cv::Mat& src,
cv::Mat& dst,
cv::Size dst_size,
object_rect& effect_area) {
int w = src.cols;
int h = src.rows;
int dst_w = dst_size.width;
int dst_h = dst_size.height;
dst = cv::Mat(cv::Size(dst_w, dst_h), CV_8UC3, cv::Scalar(0));
float ratio_src = w * 1.0 / h;
float ratio_dst = dst_w * 1.0 / dst_h;
int tmp_w = 0;
int tmp_h = 0;
if (ratio_src > ratio_dst) {
tmp_w = dst_w;
tmp_h = floor((dst_w * 1.0 / w) * h);
} else if (ratio_src < ratio_dst) {
tmp_h = dst_h;
tmp_w = floor((dst_h * 1.0 / h) * w);
} else {
cv::resize(src, dst, dst_size);
effect_area.x = 0;
effect_area.y = 0;
effect_area.width = dst_w;
effect_area.height = dst_h;
return 0;
}
cv::Mat tmp;
cv::resize(src, tmp, cv::Size(tmp_w, tmp_h));
if (tmp_w != dst_w) {
int index_w = floor((dst_w - tmp_w) / 2.0);
for (int i = 0; i < dst_h; i++) {
memcpy(dst.data + i * dst_w * 3 + index_w * 3,
tmp.data + i * tmp_w * 3,
tmp_w * 3);
}
effect_area.x = index_w;
effect_area.y = 0;
effect_area.width = tmp_w;
effect_area.height = tmp_h;
} else if (tmp_h != dst_h) {
int index_h = floor((dst_h - tmp_h) / 2.0);
memcpy(dst.data + index_h * dst_w * 3, tmp.data, tmp_w * tmp_h * 3);
effect_area.x = 0;
effect_area.y = index_h;
effect_area.width = tmp_w;
effect_area.height = tmp_h;
} else {
printf("error\n");
}
return 0;
}
const int color_list[80][3] = {
{216, 82, 24}, {236, 176, 31}, {125, 46, 141}, {118, 171, 47},
{76, 189, 237}, {238, 19, 46}, {76, 76, 76}, {153, 153, 153},
{255, 0, 0}, {255, 127, 0}, {190, 190, 0}, {0, 255, 0},
{0, 0, 255}, {170, 0, 255}, {84, 84, 0}, {84, 170, 0},
{84, 255, 0}, {170, 84, 0}, {170, 170, 0}, {170, 255, 0},
{255, 84, 0}, {255, 170, 0}, {255, 255, 0}, {0, 84, 127},
{0, 170, 127}, {0, 255, 127}, {84, 0, 127}, {84, 84, 127},
{84, 170, 127}, {84, 255, 127}, {170, 0, 127}, {170, 84, 127},
{170, 170, 127}, {170, 255, 127}, {255, 0, 127}, {255, 84, 127},
{255, 170, 127}, {255, 255, 127}, {0, 84, 255}, {0, 170, 255},
{0, 255, 255}, {84, 0, 255}, {84, 84, 255}, {84, 170, 255},
{84, 255, 255}, {170, 0, 255}, {170, 84, 255}, {170, 170, 255},
{170, 255, 255}, {255, 0, 255}, {255, 84, 255}, {255, 170, 255},
{42, 0, 0}, {84, 0, 0}, {127, 0, 0}, {170, 0, 0},
{212, 0, 0}, {255, 0, 0}, {0, 42, 0}, {0, 84, 0},
{0, 127, 0}, {0, 170, 0}, {0, 212, 0}, {0, 255, 0},
{0, 0, 42}, {0, 0, 84}, {0, 0, 127}, {0, 0, 170},
{0, 0, 212}, {0, 0, 255}, {0, 0, 0}, {36, 36, 36},
{72, 72, 72}, {109, 109, 109}, {145, 145, 145}, {182, 182, 182},
{218, 218, 218}, {0, 113, 188}, {80, 182, 188}, {127, 127, 0},
};
void draw_bboxes(const cv::Mat& bgr,
const std::vector<BoxInfo>& bboxes,
object_rect effect_roi,
std::string save_path = "None") {
static const char* class_names[] = {
"person", "bicycle", "car",
"motorcycle", "airplane", "bus",
"train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign",
"parking meter", "bench", "bird",
"cat", "dog", "horse",
"sheep", "cow", "elephant",
"bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag",
"tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball",
"kite", "baseball bat", "baseball glove",
"skateboard", "surfboard", "tennis racket",
"bottle", "wine glass", "cup",
"fork", "knife", "spoon",
"bowl", "banana", "apple",
"sandwich", "orange", "broccoli",
"carrot", "hot dog", "pizza",
"donut", "cake", "chair",
"couch", "potted plant", "bed",
"dining table", "toilet", "tv",
"laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave",
"oven", "toaster", "sink",
"refrigerator", "book", "clock",
"vase", "scissors", "teddy bear",
"hair drier", "toothbrush"};
cv::Mat image = bgr.clone();
int src_w = image.cols;
int src_h = image.rows;
int dst_w = effect_roi.width;
int dst_h = effect_roi.height;
float width_ratio = (float)src_w / (float)dst_w;
float height_ratio = (float)src_h / (float)dst_h;
for (size_t i = 0; i < bboxes.size(); i++) {
const BoxInfo& bbox = bboxes[i];
cv::Scalar color = cv::Scalar(color_list[bbox.label][0],
color_list[bbox.label][1],
color_list[bbox.label][2]);
cv::rectangle(image,
cv::Rect(cv::Point((bbox.x1 - effect_roi.x) * width_ratio,
(bbox.y1 - effect_roi.y) * height_ratio),
cv::Point((bbox.x2 - effect_roi.x) * width_ratio,
(bbox.y2 - effect_roi.y) * height_ratio)),
color);
char text[256];
sprintf(text, "%s %.1f%%", class_names[bbox.label], bbox.score * 100);
int baseLine = 0;
cv::Size label_size =
cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine);
int x = (bbox.x1 - effect_roi.x) * width_ratio;
int y =
(bbox.y1 - effect_roi.y) * height_ratio - label_size.height - baseLine;
if (y < 0) y = 0;
if (x + label_size.width > image.cols) x = image.cols - label_size.width;
cv::rectangle(
image,
cv::Rect(cv::Point(x, y),
cv::Size(label_size.width, label_size.height + baseLine)),
color,
-1);
cv::putText(image,
text,
cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX,
0.4,
cv::Scalar(255, 255, 255));
}
if (save_path == "None") {
cv::imshow("image", image);
} else {
cv::imwrite(save_path, image);
std::cout << save_path << std::endl;
}
}
std::vector<BoxInfo> coordsback(const cv::Mat image,
const object_rect effect_roi,
const std::vector<BoxInfo>& bboxes) {
int src_w = image.cols;
int src_h = image.rows;
int dst_w = effect_roi.width;
int dst_h = effect_roi.height;
float width_ratio = (float)src_w / (float)dst_w;
float height_ratio = (float)src_h / (float)dst_h;
std::vector<BoxInfo> bboxes_oimg;
for (int i = 0; i < bboxes.size(); i++) {
auto bbox = bboxes[i];
bbox.x1 = (bbox.x1 - effect_roi.x) * width_ratio;
bbox.y1 = (bbox.y1 - effect_roi.y) * height_ratio;
bbox.x2 = (bbox.x2 - effect_roi.x) * width_ratio;
bbox.y2 = (bbox.y2 - effect_roi.y) * height_ratio;
bboxes_oimg.emplace_back(bbox);
}
return bboxes_oimg;
}
void image_infer_kpts(KeyPointDetector* kpts_detector,
cv::Mat image,
const object_rect effect_roi,
const std::vector<BoxInfo>& results,
std::string img_name = "kpts_vis",
bool save_img = true) {
std::vector<cv::Mat> cropimgs;
std::vector<std::vector<float>> center_bs;
std::vector<std::vector<float>> scale_bs;
std::vector<KeyPointResult> kpts_results;
auto results_oimg = coordsback(image, effect_roi, results);
for (int i = 0; i < results_oimg.size(); i++) {
auto rect = results_oimg[i];
if (rect.label == 0) {
cv::Mat cropimg;
std::vector<float> center, scale;
std::vector<int> area = {static_cast<int>(rect.x1),
static_cast<int>(rect.y1),
static_cast<int>(rect.x2),
static_cast<int>(rect.y2)};
CropImg(image, cropimg, area, center, scale);
// cv::imwrite("./test_crop_"+std::to_string(i)+".jpg", cropimg);
cropimgs.emplace_back(cropimg);
center_bs.emplace_back(center);
scale_bs.emplace_back(scale);
}
if (cropimgs.size() == 1 ||
(cropimgs.size() > 0 && i == results_oimg.size() - 1)) {
kpts_detector->Predict(cropimgs, center_bs, scale_bs, &kpts_results);
cropimgs.clear();
center_bs.clear();
scale_bs.clear();
}
}
std::vector<int> compression_params;
compression_params.push_back(cv::IMWRITE_JPEG_QUALITY);
compression_params.push_back(95);
std::string kpts_savepath =
"keypoint_" + img_name.substr(img_name.find_last_of('/') + 1);
cv::Mat kpts_vis_img =
VisualizeKptsResult(image, kpts_results, {0, 255, 0}, 0.3);
if (save_img) {
cv::imwrite(kpts_savepath, kpts_vis_img, compression_params);
printf("Visualized output saved as %s\n", kpts_savepath.c_str());
} else {
cv::imshow("image", kpts_vis_img);
}
}
int image_demo(PicoDet& detector,
KeyPointDetector* kpts_detector,
const char* imagepath) {
std::vector<cv::String> filenames;
cv::glob(imagepath, filenames, false);
for (auto img_name : filenames) {
cv::Mat image = cv::imread(img_name);
if (image.empty()) {
fprintf(stderr, "cv::imread %s failed\n", img_name.c_str());
return -1;
}
object_rect effect_roi;
cv::Mat resized_img;
resize_uniform(image, resized_img, cv::Size(320, 320), effect_roi);
std::vector<BoxInfo> results;
detector.detect(resized_img, results);
if (kpts_detector) {
image_infer_kpts(kpts_detector, image, effect_roi, results, img_name);
}
}
return 0;
}
int webcam_demo(PicoDet& detector,
KeyPointDetector* kpts_detector,
int cam_id) {
cv::Mat image;
cv::VideoCapture cap(cam_id);
while (true) {
cap >> image;
object_rect effect_roi;
cv::Mat resized_img;
resize_uniform(image, resized_img, cv::Size(320, 320), effect_roi);
std::vector<BoxInfo> results;
detector.detect(resized_img, results);
if (kpts_detector) {
image_infer_kpts(kpts_detector, image, effect_roi, results, "", false);
}
}
return 0;
}
int video_demo(PicoDet& detector,
KeyPointDetector* kpts_detector,
const char* path) {
cv::Mat image;
cv::VideoCapture cap(path);
while (true) {
cap >> image;
object_rect effect_roi;
cv::Mat resized_img;
resize_uniform(image, resized_img, cv::Size(320, 320), effect_roi);
std::vector<BoxInfo> results;
detector.detect(resized_img, results);
if (kpts_detector) {
image_infer_kpts(kpts_detector, image, effect_roi, results, "", false);
}
}
return 0;
}
int benchmark(KeyPointDetector* kpts_detector) {
int loop_num = 100;
int warm_up = 8;
double time_min = DBL_MAX;
double time_max = -DBL_MAX;
double time_avg = 0;
cv::Mat image(256, 192, CV_8UC3, cv::Scalar(1, 1, 1));
std::vector<float> center = {128, 96};
std::vector<float> scale = {256, 192};
std::vector<cv::Mat> cropimgs = {image};
std::vector<std::vector<float>> center_bs = {center};
std::vector<std::vector<float>> scale_bs = {scale};
std::vector<KeyPointResult> kpts_results;
for (int i = 0; i < warm_up + loop_num; i++) {
auto start = std::chrono::steady_clock::now();
std::vector<BoxInfo> results;
kpts_detector->Predict(cropimgs, center_bs, scale_bs, &kpts_results);
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed = end - start;
double time = elapsed.count();
if (i >= warm_up) {
time_min = (std::min)(time_min, time);
time_max = (std::max)(time_max, time);
time_avg += time;
}
}
time_avg /= loop_num;
fprintf(stderr,
"%20s min = %7.2f max = %7.2f avg = %7.2f\n",
"tinypose",
time_min,
time_max,
time_avg);
return 0;
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr,
"usage: %s [mode] [path]. \n For webcam mode=0, path is cam id; \n "
"For image demo, mode=1, path=xxx/xxx/*.jpg; \n For video, mode=2; "
"\n For benchmark, mode=3 path=0.\n",
argv[0]);
return -1;
}
PicoDet detector =
PicoDet("../weight/picodet_m_416.mnn", 416, 416, 4, 0.45, 0.3);
KeyPointDetector* kpts_detector =
new KeyPointDetector("../weight/tinypose256.mnn", 4, 256, 192);
int mode = atoi(argv[1]);
switch (mode) {
case 0: {
int cam_id = atoi(argv[2]);
webcam_demo(detector, kpts_detector, cam_id);
break;
}
case 1: {
const char* images = argv[2];
image_demo(detector, kpts_detector, images);
break;
}
case 2: {
const char* path = argv[2];
video_demo(detector, kpts_detector, path);
break;
}
case 3: {
benchmark(kpts_detector);
break;
}
default: {
fprintf(stderr,
"usage: %s [mode] [path]. \n For webcam mode=0, path is cam id; "
"\n For image demo, mode=1, path=xxx/xxx/*.jpg; \n For video, "
"mode=2; \n For benchmark, mode=3 path=0.\n",
argv[0]);
break;
}
}
delete kpts_detector;
kpts_detector = nullptr;
}
| PaddleDetection/deploy/third_engine/demo_mnn_kpts/main.cpp/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_mnn_kpts/main.cpp",
"repo_id": "PaddleDetection",
"token_count": 7319
} | 58 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import argparse
import onnxruntime as ort
from pathlib import Path
from tqdm import tqdm
class PicoDet():
def __init__(self,
model_pb_path,
label_path,
prob_threshold=0.4,
iou_threshold=0.3):
self.classes = list(
map(lambda x: x.strip(), open(label_path, 'r').readlines()))
self.num_classes = len(self.classes)
self.prob_threshold = prob_threshold
self.iou_threshold = iou_threshold
self.mean = np.array(
[103.53, 116.28, 123.675], dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(
[57.375, 57.12, 58.395], dtype=np.float32).reshape(1, 1, 3)
so = ort.SessionOptions()
so.log_severity_level = 3
self.net = ort.InferenceSession(model_pb_path, so)
inputs_name = [a.name for a in self.net.get_inputs()]
inputs_shape = {
k: v.shape
for k, v in zip(inputs_name, self.net.get_inputs())
}
self.input_shape = inputs_shape['image'][2:]
def _normalize(self, img):
img = img.astype(np.float32)
img = (img / 255.0 - self.mean / 255.0) / (self.std / 255.0)
return img
def resize_image(self, srcimg, keep_ratio=False):
top, left, newh, neww = 0, 0, self.input_shape[0], self.input_shape[1]
origin_shape = srcimg.shape[:2]
im_scale_y = newh / float(origin_shape[0])
im_scale_x = neww / float(origin_shape[1])
img_shape = np.array([
[float(self.input_shape[0]), float(self.input_shape[1])]
]).astype('float32')
scale_factor = np.array([[im_scale_y, im_scale_x]]).astype('float32')
if keep_ratio and srcimg.shape[0] != srcimg.shape[1]:
hw_scale = srcimg.shape[0] / srcimg.shape[1]
if hw_scale > 1:
newh, neww = self.input_shape[0], int(self.input_shape[1] /
hw_scale)
img = cv2.resize(
srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
left = int((self.input_shape[1] - neww) * 0.5)
img = cv2.copyMakeBorder(
img,
0,
0,
left,
self.input_shape[1] - neww - left,
cv2.BORDER_CONSTANT,
value=0) # add border
else:
newh, neww = int(self.input_shape[0] *
hw_scale), self.input_shape[1]
img = cv2.resize(
srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
top = int((self.input_shape[0] - newh) * 0.5)
img = cv2.copyMakeBorder(
img,
top,
self.input_shape[0] - newh - top,
0,
0,
cv2.BORDER_CONSTANT,
value=0)
else:
img = cv2.resize(
srcimg, self.input_shape, interpolation=cv2.INTER_LINEAR)
return img, img_shape, scale_factor
def get_color_map_list(self, num_classes):
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def detect(self, srcimg):
img, im_shape, scale_factor = self.resize_image(srcimg)
img = self._normalize(img)
blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)
inputs_dict = {
'im_shape': im_shape,
'image': blob,
'scale_factor': scale_factor
}
inputs_name = [a.name for a in self.net.get_inputs()]
net_inputs = {k: inputs_dict[k] for k in inputs_name}
outs = self.net.run(None, net_inputs)
outs = np.array(outs[0])
expect_boxes = (outs[:, 1] > 0.5) & (outs[:, 0] > -1)
np_boxes = outs[expect_boxes, :]
color_list = self.get_color_map_list(self.num_classes)
clsid2color = {}
for i in range(np_boxes.shape[0]):
classid, conf = int(np_boxes[i, 0]), np_boxes[i, 1]
xmin, ymin, xmax, ymax = int(np_boxes[i, 2]), int(np_boxes[
i, 3]), int(np_boxes[i, 4]), int(np_boxes[i, 5])
if classid not in clsid2color:
clsid2color[classid] = color_list[classid]
color = tuple(clsid2color[classid])
cv2.rectangle(
srcimg, (xmin, ymin), (xmax, ymax), color, thickness=2)
print(self.classes[classid] + ': ' + str(round(conf, 3)))
cv2.putText(
srcimg,
self.classes[classid] + ':' + str(round(conf, 3)), (xmin,
ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (0, 255, 0),
thickness=2)
return srcimg
def detect_folder(self, img_fold, result_path):
img_fold = Path(img_fold)
result_path = Path(result_path)
result_path.mkdir(parents=True, exist_ok=True)
img_name_list = filter(
lambda x: str(x).endswith(".png") or str(x).endswith(".jpg"),
img_fold.iterdir(), )
img_name_list = list(img_name_list)
print(f"find {len(img_name_list)} images")
for img_path in tqdm(img_name_list):
img = cv2.imread(str(img_path), 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
srcimg = net.detect(img)
save_path = str(result_path / img_path.name.replace(".png", ".jpg"))
cv2.imwrite(save_path, srcimg)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--modelpath',
type=str,
default='onnx_file/picodet_s_320_lcnet_postprocessed.onnx',
help="onnx filepath")
parser.add_argument(
'--classfile',
type=str,
default='coco_label.txt',
help="classname filepath")
parser.add_argument(
'--confThreshold', default=0.5, type=float, help='class confidence')
parser.add_argument(
'--nmsThreshold', default=0.6, type=float, help='nms iou thresh')
parser.add_argument(
"--img_fold", dest="img_fold", type=str, default="./imgs")
parser.add_argument(
"--result_fold", dest="result_fold", type=str, default="results")
args = parser.parse_args()
net = PicoDet(
args.modelpath,
args.classfile,
prob_threshold=args.confThreshold,
iou_threshold=args.nmsThreshold)
net.detect_folder(args.img_fold, args.result_fold)
print(
f'infer results in ./deploy/third_engine/demo_onnxruntime/{args.result_fold}'
)
| PaddleDetection/deploy/third_engine/demo_onnxruntime/infer_demo.py/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_onnxruntime/infer_demo.py",
"repo_id": "PaddleDetection",
"token_count": 4119
} | 59 |
简体中文 | [English](./pphuman_mtmct_en.md)
# 跨镜跟踪任务二次开发
## 数据准备
### 数据格式
跨镜跟踪使用行人REID技术实现,其训练方式采用多分类模型训练,使用时取分类softmax头部前的特征作为检索特征向量。
因此其格式与多分类任务相同。每一个行人分配一个专属id,不同行人id不同,同一行人在不同图片中的id相同。
例如图片0001.jpg、0003.jpg是同一个人,0002.jpg、0004.jpg是不同的其他行人。则标注id为:
```
0001.jpg 00001
0002.jpg 00002
0003.jpg 00001
0004.jpg 00003
...
```
依次类推。
### 数据标注
理解了上面`标注`格式的含义后,就可以进行数据标注的工作。其本质是:每张单人图建立一个标注项,对应该行人分配的id。
举例:
对于一张原始图片,
1) 使用检测框,标注图片中每一个人的位置。
2) 每一个检测框(对应每一个人),包含一个int类型的id属性。例如,上述举例中的0001.jpg中的人,对应id:1.
标注完成后利用检测框将每一个人截取成单人图,其图片与id属性标注建立对应关系。也可先截成单人图再进行标注,效果相同。
## 模型训练
数据标注完成后,就可以拿来做模型的训练,完成自定义模型的优化工作。
其主要有两步工作需要完成:1)将数据与标注数据整理成训练格式。2)修改配置文件开始训练。
### 训练数据格式
训练数据包括训练使用的图片和一个训练列表bounding_box_train.txt,其具体位置在训练配置中指定,其放置方式示例如下:
```
REID/
|-- data 训练图片文件夹
| |-- 00001.jpg
| |-- 00002.jpg
| `-- 0000x.jpg
`-- bounding_box_train.txt 训练数据列表
```
bounding_box_train.txt文件内为所有训练图片名称(相对于根路径的文件路径)+ 1个id标注值
其每一行表示一个人的图片和id标注结果。其格式为:
```
0001.jpg 00001
0002.jpg 00002
0003.jpg 00001
0004.jpg 00003
```
注意:图片与标注值之间是以Tab[\t]符号隔开。该格式不能错,否则解析失败。
### 修改配置开始训练
首先执行以下命令下载训练代码(更多环境问题请参考[Install_PaddleClas](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/installation/install_paddleclas_en.md)):
```shell
git clone https://github.com/PaddlePaddle/PaddleClas
```
需要在配置文件[softmax_triplet_with_center.yaml](https://github.com/PaddlePaddle/PaddleClas/blob/develop/ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml)中,修改的配置项如下:
```
Head:
name: "FC"
embedding_size: *feat_dim
class_num: &class_num 751 #行人id总数量
DataLoader:
Train:
dataset:
name: "Market1501"
image_root: "./dataset/" #训练图片根路径
cls_label_path: "bounding_box_train" #训练文件列表
Eval:
Query:
dataset:
name: "Market1501"
image_root: "./dataset/" #评估图片根路径
cls_label_path: "query" #评估文件列表
```
注意:
1. 这里image_root路径+bounding_box_train.txt中图片相对路径,对应图片存放的完整路径。
然后运行以下命令开始训练。
```
#多卡训练
export CUDA_VISIBLE_DEVICES=0,1,2,3
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3" \
tools/train.py \
-c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml
#单卡训练
python3 tools/train.py \
-c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml
```
训练完成后可以执行以下命令进行性能评估:
```
#多卡评估
export CUDA_VISIBLE_DEVICES=0,1,2,3
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3" \
tools/eval.py \
-c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \
-o Global.pretrained_model=./output/strong_baseline/best_model
#单卡评估
python3 tools/eval.py \
-c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \
-o Global.pretrained_model=./output/strong_baseline/best_model
```
### 模型导出
使用下述命令将训练好的模型导出为预测部署模型。
```
python3 tools/export_model.py \
-c ./ppcls/configs/reid/strong_baseline/softmax_triplet_with_center.yaml \
-o Global.pretrained_model=./output/strong_baseline/best_model \
-o Global.save_inference_dir=deploy/models/strong_baseline_inference
```
导出模型后,下载[infer_cfg.yml](https://bj.bcebos.com/v1/paddledet/models/pipeline/REID/infer_cfg.yml)文件到新导出的模型文件夹'strong_baseline_inference'中。
使用时在PP-Human中的配置文件infer_cfg_pphuman.yml中修改模型路径`model_dir`并开启功能`enable`。
```
REID:
model_dir: [YOUR_DEPLOY_MODEL_DIR]/strong_baseline_inference/
enable: True
```
然后可以使用。至此完成模型开发。
| PaddleDetection/docs/advanced_tutorials/customization/pphuman_mtmct.md/0 | {
"file_path": "PaddleDetection/docs/advanced_tutorials/customization/pphuman_mtmct.md",
"repo_id": "PaddleDetection",
"token_count": 3061
} | 60 |
English | [简体中文](GETTING_STARTED_cn.md)
# Getting Started
## Installation
For setting up the running environment, please refer to [installation
instructions](INSTALL_cn.md).
## Data preparation
- Please refer to [PrepareDetDataSet](./data/PrepareDetDataSet_en.md) for data preparation
- Please set the data path for data configuration file in ```configs/datasets```
## Training & Evaluation & Inference
PaddleDetection provides scripts for training, evalution and inference with various features according to different configure. And for more distribued training details see [DistributedTraining].(./DistributedTraining_en.md)
```bash
# training on single-GPU
export CUDA_VISIBLE_DEVICES=0
python tools/train.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml
# training on multi-GPU
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml
# training on multi-machines and multi-GPUs
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
$fleetrun --ips="10.127.6.17,10.127.5.142,10.127.45.13,10.127.44.151" --selected_gpu 0,1,2,3,4,5,6,7 tools/train.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml
# GPU evaluation
export CUDA_VISIBLE_DEVICES=0
python tools/eval.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_1x_coco.pdparams
# Inference
python tools/infer.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml --infer_img=demo/000000570688.jpg -o weights=https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_1x_coco.pdparams
```
### Other argument list
list below can be viewed by `--help`
| FLAG | script supported | description | default | remark |
| :----------------------: | :------------: | :---------------: | :--------------: | :-----------------: |
| -c | ALL | Select config file | None | **required**, such as `-c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml` |
| -o | ALL | Set parameters in configure file | None | `-o` has higher priority to file configured by `-c`. Such as `-o use_gpu=False` |
| --eval | train | Whether to perform evaluation in training | False | set `--eval` if needed |
| -r/--resume_checkpoint | train | Checkpoint path for resuming training | None | such as `-r output/faster_rcnn_r50_1x_coco/10000` |
| --slim_config | ALL | Configure file of slim method | None | such as `--slim_config configs/slim/prune/yolov3_prune_l1_norm.yml` |
| --use_vdl | train/infer | Whether to record the data with [VisualDL](https://github.com/paddlepaddle/visualdl), so as to display in VisualDL | False | VisualDL requires Python>=3.5 |
| --vdl\_log_dir | train/infer | VisualDL logging directory for image | train:`vdl_log_dir/scalar` infer: `vdl_log_dir/image` | VisualDL requires Python>=3.5 |
| --output_eval | eval | Directory for storing the evaluation output | None | such as `--output_eval=eval_output`, default is current directory |
| --json_eval | eval | Whether to evaluate with already existed bbox.json or mask.json | False | set `--json_eval` if needed and json path is set in `--output_eval` |
| --classwise | eval | Whether to eval AP for each class and draw PR curve | False | set `--classwise` if needed |
| --output_dir | infer | Directory for storing the output visualization files | `./output` | such as `--output_dir output` |
| --draw_threshold | infer | Threshold to reserve the result for visualization | 0.5 | such as `--draw_threshold 0.7` |
| --infer_dir | infer | Directory for images to perform inference on | None | One of `infer_dir` and `infer_img` is requied |
| --infer_img | infer | Image path | None | One of `infer_dir` and `infer_img` is requied, `infer_img` has higher priority over `infer_dir` |
| --save_results | infer | Whether to save detection results to file | False | Optional
## Examples
### Training
- Perform evaluation in training
```bash
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml --eval
```
Perform training and evalution alternatively and evaluate at each end of epoch. Meanwhile, the best model with highest MAP is saved at each epoch which has the same path as `model_final`.
If evaluation dataset is large, we suggest modifing `snapshot_epoch` in `configs/runtime.yml` to decrease evaluation times or evaluating after training.
- Fine-tune other task
When using pre-trained model to fine-tune other task, pretrain\_weights can be used directly. The parameters with different shape will be ignored automatically. For example:
```bash
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# If the shape of parameters in program is different from pretrain_weights,
# then PaddleDetection will not use such parameters.
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml \
-o pretrain_weights=output/faster_rcnn_r50_1x_coco/model_final \
```
##### NOTES
- `CUDA_VISIBLE_DEVICES` can specify different gpu numbers. Such as: `export CUDA_VISIBLE_DEVICES=0,1,2,3`.
- Dataset will be downloaded automatically and cached in `~/.cache/paddle/dataset` if not be found locally.
- Pretrained model is downloaded automatically and cached in `~/.cache/paddle/weights`.
- Checkpoints are saved in `output` by default, and can be revised from `save_dir` in `configs/runtime.yml`.
### Evaluation
- Evaluate by specified weights path and dataset path
```bash
export CUDA_VISIBLE_DEVICES=0
python -u tools/eval.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml \
-o weights=https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_1x_coco.pdparams
```
The path of model to be evaluted can be both local path and link in [MODEL_ZOO](../MODEL_ZOO_cn.md).
- Evaluate with json
```bash
export CUDA_VISIBLE_DEVICES=0
python tools/eval.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml \
--json_eval \
-output_eval evaluation/
```
The json file must be named bbox.json or mask.json, placed in the `evaluation/` directory.
### Inference
- Output specified directory && Set up threshold
```bash
export CUDA_VISIBLE_DEVICES=0
python tools/infer.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml \
--infer_img=demo/000000570688.jpg \
--output_dir=infer_output/ \
--draw_threshold=0.5 \
-o weights=output/faster_rcnn_r50_fpn_1x_coco/model_final \
--use_vdl=True
```
`--draw_threshold` is an optional argument. Default is 0.5.
Different thresholds will produce different results depending on the calculation of [NMS](https://ieeexplore.ieee.org/document/1699659).
## Deployment
Please refer to [depolyment](../../deploy/README_en.md)
## Model Compression
Please refer to [slim](../../configs/slim/README_en.md)
| PaddleDetection/docs/tutorials/GETTING_STARTED.md/0 | {
"file_path": "PaddleDetection/docs/tutorials/GETTING_STARTED.md",
"repo_id": "PaddleDetection",
"token_count": 3061
} | 61 |
简体中文 | [English](DetAnnoTools_en.md)
# 目标检测标注工具
## 目录
[LabelMe](#LabelMe)
* [使用说明](#使用说明)
* [安装](#LabelMe安装)
* [图片标注过程](#LabelMe图片标注过程)
* [标注格式](#LabelMe标注格式)
* [导出数据格式](#LabelMe导出数据格式)
* [格式转化总结](#格式转化总结)
* [标注文件(json)-->VOC](#标注文件(json)-->VOC数据集)
* [标注文件(json)-->COCO](#标注文件(json)-->COCO数据集)
[LabelImg](#LabelImg)
* [使用说明](#使用说明)
* [LabelImg安装](#LabelImg安装)
* [安装注意事项](#安装注意事项)
* [图片标注过程](#LabelImg图片标注过程)
* [标注格式](#LabelImg标注格式)
* [导出数据格式](#LabelImg导出数据格式)
* [格式转换注意事项](#格式转换注意事项)
## [LabelMe](https://github.com/wkentaro/labelme)
### 使用说明
#### LabelMe安装
具体安装操作请参考[LabelMe官方教程](https://github.com/wkentaro/labelme)中的Installation
<details>
<summary><b> Ubuntu</b></summary>
```
sudo apt-get install labelme
# or
sudo pip3 install labelme
# or install standalone executable from:
# https://github.com/wkentaro/labelme/releases
```
</details>
<details>
<summary><b> macOS</b></summary>
```
brew install pyqt # maybe pyqt5
pip install labelme
# or
brew install wkentaro/labelme/labelme # command line interface
# brew install --cask wkentaro/labelme/labelme # app
# or install standalone executable/app from:
# https://github.com/wkentaro/labelme/releases
```
</details>
推荐使用Anaconda的安装方式
```
conda create –name=labelme python=3
conda activate labelme
pip install pyqt5
pip install labelme
```
#### LabelMe图片标注过程
启动labelme后,选择图片文件或者图片所在文件夹
左侧编辑栏选择`create polygons` 绘制标注区域如下图所示(右击图像区域可以选择不同的标注形状),绘制好区域后按下回车,弹出新的框填入标注区域对应的标签,如:people
左侧菜单栏点击保存,生成`json`形式的**标注文件**

### LabelMe标注格式
#### LabelMe导出数据格式
```
#生成标注文件
png/jpeg/jpg-->labelme标注-->json
```
#### 格式转化总结
```
#标注文件转化为VOC数据集格式
json-->labelme2voc.py-->VOC数据集
#标注文件转化为COCO数据集格式
json-->labelme2coco.py-->COCO数据集
```
#### 标注文件(json)-->VOC数据集
使用[官方给出的labelme2voc.py](https://github.com/wkentaro/labelme/blob/main/examples/bbox_detection/labelme2voc.py)这份脚本
下载该脚本,在命令行中使用
```Te
python labelme2voc.py data_annotated(标注文件所在文件夹) data_dataset_voc(输出文件夹) --labels labels.txt
```
运行后,在指定的输出文件夹中会如下的目录
```
# It generates:
# - data_dataset_voc/JPEGImages
# - data_dataset_voc/Annotations
# - data_dataset_voc/AnnotationsVisualization
```
#### 标注文件(json)-->COCO数据集
使用[PaddleDetection提供的x2coco.py](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/tools/x2coco.py) 将labelme标注的数据转换为COCO数据集形式
```bash
python tools/x2coco.py \
--dataset_type labelme \
--json_input_dir ./labelme_annos/ \
--image_input_dir ./labelme_imgs/ \
--output_dir ./cocome/ \
--train_proportion 0.8 \
--val_proportion 0.2 \
--test_proportion 0.0
```
用户数据集转成COCO数据后目录结构如下(注意数据集中路径名、文件名尽量不要使用中文,避免中文编码问题导致出错):
```
dataset/xxx/
├── annotations
│ ├── train.json # coco数据的标注文件
│ ├── valid.json # coco数据的标注文件
├── images
│ ├── xxx1.jpg
│ ├── xxx2.jpg
│ ├── xxx3.jpg
│ | ...
...
```
## [LabelImg](https://github.com/tzutalin/labelImg)
### 使用说明
#### LabelImg安装
安装操作请参考[LabelImg官方教程](https://github.com/tzutalin/labelImg)
<details>
<summary><b> Ubuntu</b></summary>
```
sudo apt-get install pyqt5-dev-tools
sudo pip3 install -r requirements/requirements-linux-python3.txt
make qt5py3
python3 labelImg.py
python3 labelImg.py [IMAGE_PATH] [PRE-DEFINED CLASS FILE]
```
</details>
<details>
<summary><b>macOS</b></summary>
```
brew install qt # Install qt-5.x.x by Homebrew
brew install libxml2
or using pip
pip3 install pyqt5 lxml # Install qt and lxml by pip
make qt5py3
python3 labelImg.py
python3 labelImg.py [IMAGE_PATH] [PRE-DEFINED CLASS FILE]
```
</details>
推荐使用Anaconda的安装方式
首先下载并进入 [labelImg](https://github.com/tzutalin/labelImg#labelimg) 的目录
```
conda install pyqt=5
conda install -c anaconda lxml
pyrcc5 -o libs/resources.py resources.qrc
python labelImg.py
python labelImg.py [IMAGE_PATH] [PRE-DEFINED CLASS FILE]
```
#### 安装注意事项
以Anaconda安装方式为例,比Labelme配置要麻烦一些
启动方式是通过python运行脚本`python labelImg.py <图片路径>`
#### LabelImg图片标注过程
启动labelImg后,选择图片文件或者图片所在文件夹
左侧编辑栏选择`创建区块` 绘制标注区,在弹出新的框选择对应的标签
左侧菜单栏点击保存,可以选择VOC/YOLO/CreateML三种类型的标注文件

### LabelImg标注格式
#### LabelImg导出数据格式
```
#生成标注文件
png/jpeg/jpg-->labelImg标注-->xml/txt/json
```
#### 格式转换注意事项
**PaddleDetection支持VOC或COCO格式的数据**,经LabelImg标注导出后的标注文件,需要修改为**VOC或COCO格式**,调整说明可以参考[准备训练数据](./PrepareDataSet.md#%E5%87%86%E5%A4%87%E8%AE%AD%E7%BB%83%E6%95%B0%E6%8D%AE)
| PaddleDetection/docs/tutorials/data/DetAnnoTools.md/0 | {
"file_path": "PaddleDetection/docs/tutorials/data/DetAnnoTools.md",
"repo_id": "PaddleDetection",
"token_count": 3367
} | 62 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import (core, data, engine, modeling, model_zoo, optimizer, metrics,
utils, slim)
try:
from .version import full_version as __version__
from .version import commit as __git_commit__
except ImportError:
import sys
sys.stderr.write("Warning: import ppdet from source directory " \
"without installing, run 'python setup.py install' to " \
"install ppdet firstly\n")
| PaddleDetection/ppdet/__init__.py/0 | {
"file_path": "PaddleDetection/ppdet/__init__.py",
"repo_id": "PaddleDetection",
"token_count": 318
} | 63 |
from ppdet.core.workspace import register, serializable
import cv2
import os
import tarfile
import numpy as np
import os.path as osp
from ppdet.data.source.dataset import DetDataset
from imgaug.augmentables.lines import LineStringsOnImage
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from ppdet.data.culane_utils import lane_to_linestrings
import pickle as pkl
from ppdet.utils.logger import setup_logger
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
from .dataset import DetDataset, _make_dataset, _is_valid_file
from ppdet.utils.download import download_dataset
logger = setup_logger(__name__)
@register
@serializable
class CULaneDataSet(DetDataset):
def __init__(
self,
dataset_dir,
cut_height,
list_path,
split='train',
data_fields=['image'],
video_file=None,
frame_rate=-1, ):
super(CULaneDataSet, self).__init__(
dataset_dir=dataset_dir,
cut_height=cut_height,
split=split,
data_fields=data_fields)
self.dataset_dir = dataset_dir
self.list_path = osp.join(dataset_dir, list_path)
self.cut_height = cut_height
self.data_fields = data_fields
self.split = split
self.training = 'train' in split
self.data_infos = []
self.video_file = video_file
self.frame_rate = frame_rate
self._imid2path = {}
self.predict_dir = None
def __len__(self):
return len(self.data_infos)
def check_or_download_dataset(self):
if not osp.exists(self.dataset_dir):
download_dataset("dataset", dataset="culane")
# extract .tar files in self.dataset_dir
for fname in os.listdir(self.dataset_dir):
logger.info("Decompressing {}...".format(fname))
# ignore .* files
if fname.startswith('.'):
continue
if fname.find('.tar.gz') >= 0:
with tarfile.open(osp.join(self.dataset_dir, fname)) as tf:
tf.extractall(path=self.dataset_dir)
logger.info("Dataset files are ready.")
def parse_dataset(self):
logger.info('Loading CULane annotations...')
if self.predict_dir is not None:
logger.info('switch to predict mode')
return
# Waiting for the dataset to load is tedious, let's cache it
os.makedirs('cache', exist_ok=True)
cache_path = 'cache/culane_paddle_{}.pkl'.format(self.split)
if os.path.exists(cache_path):
with open(cache_path, 'rb') as cache_file:
self.data_infos = pkl.load(cache_file)
self.max_lanes = max(
len(anno['lanes']) for anno in self.data_infos)
return
with open(self.list_path) as list_file:
for line in list_file:
infos = self.load_annotation(line.split())
self.data_infos.append(infos)
# cache data infos to file
with open(cache_path, 'wb') as cache_file:
pkl.dump(self.data_infos, cache_file)
def load_annotation(self, line):
infos = {}
img_line = line[0]
img_line = img_line[1 if img_line[0] == '/' else 0::]
img_path = os.path.join(self.dataset_dir, img_line)
infos['img_name'] = img_line
infos['img_path'] = img_path
if len(line) > 1:
mask_line = line[1]
mask_line = mask_line[1 if mask_line[0] == '/' else 0::]
mask_path = os.path.join(self.dataset_dir, mask_line)
infos['mask_path'] = mask_path
if len(line) > 2:
exist_list = [int(l) for l in line[2:]]
infos['lane_exist'] = np.array(exist_list)
anno_path = img_path[:
-3] + 'lines.txt' # remove sufix jpg and add lines.txt
with open(anno_path, 'r') as anno_file:
data = [
list(map(float, line.split())) for line in anno_file.readlines()
]
lanes = [[(lane[i], lane[i + 1]) for i in range(0, len(lane), 2)
if lane[i] >= 0 and lane[i + 1] >= 0] for lane in data]
lanes = [list(set(lane)) for lane in lanes] # remove duplicated points
lanes = [lane for lane in lanes
if len(lane) > 2] # remove lanes with less than 2 points
lanes = [sorted(
lane, key=lambda x: x[1]) for lane in lanes] # sort by y
infos['lanes'] = lanes
return infos
def set_images(self, images):
self.predict_dir = images
self.data_infos = self._load_images()
def _find_images(self):
predict_dir = self.predict_dir
if not isinstance(predict_dir, Sequence):
predict_dir = [predict_dir]
images = []
for im_dir in predict_dir:
if os.path.isdir(im_dir):
im_dir = os.path.join(self.predict_dir, im_dir)
images.extend(_make_dataset(im_dir))
elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
images.append(im_dir)
return images
def _load_images(self):
images = self._find_images()
ct = 0
records = []
for image in images:
assert image != '' and os.path.isfile(image), \
"Image {} not found".format(image)
if self.sample_num > 0 and ct >= self.sample_num:
break
rec = {
'im_id': np.array([ct]),
"img_path": os.path.abspath(image),
"img_name": os.path.basename(image),
"lanes": []
}
self._imid2path[ct] = image
ct += 1
records.append(rec)
assert len(records) > 0, "No image file found"
return records
def get_imid2path(self):
return self._imid2path
def __getitem__(self, idx):
data_info = self.data_infos[idx]
img = cv2.imread(data_info['img_path'])
img = img[self.cut_height:, :, :]
sample = data_info.copy()
sample.update({'image': img})
img_org = sample['image']
if self.training:
label = cv2.imread(sample['mask_path'], cv2.IMREAD_UNCHANGED)
if len(label.shape) > 2:
label = label[:, :, 0]
label = label.squeeze()
label = label[self.cut_height:, :]
sample.update({'mask': label})
if self.cut_height != 0:
new_lanes = []
for i in sample['lanes']:
lanes = []
for p in i:
lanes.append((p[0], p[1] - self.cut_height))
new_lanes.append(lanes)
sample.update({'lanes': new_lanes})
sample['mask'] = SegmentationMapsOnImage(
sample['mask'], shape=img_org.shape)
sample['full_img_path'] = data_info['img_path']
sample['img_name'] = data_info['img_name']
sample['im_id'] = np.array([idx])
sample['image'] = sample['image'].copy().astype(np.uint8)
sample['lanes'] = lane_to_linestrings(sample['lanes'])
sample['lanes'] = LineStringsOnImage(
sample['lanes'], shape=img_org.shape)
sample['seg'] = np.zeros(img_org.shape)
return sample
| PaddleDetection/ppdet/data/source/culane.py/0 | {
"file_path": "PaddleDetection/ppdet/data/source/culane.py",
"repo_id": "PaddleDetection",
"token_count": 3779
} | 64 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
from numbers import Integral
import cv2
import copy
import numpy as np
import random
import math
from .operators import BaseOperator, register_op
from .batch_operators import Gt2TTFTarget
from ppdet.modeling.bbox_utils import bbox_iou_np_expand
from ppdet.utils.logger import setup_logger
from .op_helper import gaussian_radius
logger = setup_logger(__name__)
__all__ = [
'RGBReverse', 'LetterBoxResize', 'MOTRandomAffine', 'Gt2JDETargetThres',
'Gt2JDETargetMax', 'Gt2FairMOTTarget'
]
@register_op
class RGBReverse(BaseOperator):
"""RGB to BGR, or BGR to RGB, sensitive to MOTRandomAffine
"""
def __init__(self):
super(RGBReverse, self).__init__()
def apply(self, sample, context=None):
im = sample['image']
sample['image'] = np.ascontiguousarray(im[:, :, ::-1])
return sample
@register_op
class LetterBoxResize(BaseOperator):
def __init__(self, target_size):
"""
Resize image to target size, convert normalized xywh to pixel xyxy
format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).
Args:
target_size (int|list): image target size.
"""
super(LetterBoxResize, self).__init__()
if not isinstance(target_size, (Integral, Sequence)):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or Tuple, now is {}".
format(type(target_size)))
if isinstance(target_size, Integral):
target_size = [target_size, target_size]
self.target_size = target_size
def apply_image(self, img, height, width, color=(127.5, 127.5, 127.5)):
# letterbox: resize a rectangular image to a padded rectangular
shape = img.shape[:2] # [height, width]
ratio_h = float(height) / shape[0]
ratio_w = float(width) / shape[1]
ratio = min(ratio_h, ratio_w)
new_shape = (round(shape[1] * ratio),
round(shape[0] * ratio)) # [width, height]
padw = (width - new_shape[0]) / 2
padh = (height - new_shape[1]) / 2
top, bottom = round(padh - 0.1), round(padh + 0.1)
left, right = round(padw - 0.1), round(padw + 0.1)
img = cv2.resize(
img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color) # padded rectangular
return img, ratio, padw, padh
def apply_bbox(self, bbox0, h, w, ratio, padw, padh):
bboxes = bbox0.copy()
bboxes[:, 0] = ratio * w * (bbox0[:, 0] - bbox0[:, 2] / 2) + padw
bboxes[:, 1] = ratio * h * (bbox0[:, 1] - bbox0[:, 3] / 2) + padh
bboxes[:, 2] = ratio * w * (bbox0[:, 0] + bbox0[:, 2] / 2) + padw
bboxes[:, 3] = ratio * h * (bbox0[:, 1] + bbox0[:, 3] / 2) + padh
return bboxes
def apply(self, sample, context=None):
""" Resize the image numpy.
"""
im = sample['image']
h, w = sample['im_shape']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image type is not numpy.".format(self))
if len(im.shape) != 3:
from PIL import UnidentifiedImageError
raise UnidentifiedImageError(
'{}: image is not 3-dimensional.'.format(self))
# apply image
height, width = self.target_size
img, ratio, padw, padh = self.apply_image(
im, height=height, width=width)
sample['image'] = img
new_shape = (round(h * ratio), round(w * ratio))
sample['im_shape'] = np.asarray(new_shape, dtype=np.float32)
sample['scale_factor'] = np.asarray([ratio, ratio], dtype=np.float32)
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], h, w, ratio,
padw, padh)
return sample
@register_op
class MOTRandomAffine(BaseOperator):
"""
Affine transform to image and coords to achieve the rotate, scale and
shift effect for training image.
Args:
degrees (list[2]): the rotate range to apply, transform range is [min, max]
translate (list[2]): the translate range to apply, transform range is [min, max]
scale (list[2]): the scale range to apply, transform range is [min, max]
shear (list[2]): the shear range to apply, transform range is [min, max]
borderValue (list[3]): value used in case of a constant border when appling
the perspective transformation
reject_outside (bool): reject warped bounding bboxes outside of image
Returns:
records(dict): contain the image and coords after tranformed
"""
def __init__(self,
degrees=(-5, 5),
translate=(0.10, 0.10),
scale=(0.50, 1.20),
shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5),
reject_outside=True):
super(MOTRandomAffine, self).__init__()
self.degrees = degrees
self.translate = translate
self.scale = scale
self.shear = shear
self.borderValue = borderValue
self.reject_outside = reject_outside
def apply(self, sample, context=None):
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
img = sample['image']
height, width = img.shape[0], img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (self.degrees[1] - self.degrees[0]
) + self.degrees[0]
s = random.random() * (self.scale[1] - self.scale[0]) + self.scale[0]
R[:2] = cv2.getRotationMatrix2D(
angle=a, center=(width / 2, height / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (
random.random() * 2 - 1
) * self.translate[0] * height + border # x translation (pixels)
T[1, 2] = (
random.random() * 2 - 1
) * self.translate[1] * width + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() *
(self.shear[1] - self.shear[0]) + self.shear[0]) *
math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() *
(self.shear[1] - self.shear[0]) + self.shear[0]) *
math.pi / 180) # y shear (deg)
M = S @T @R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(
img,
M,
dsize=(width, height),
flags=cv2.INTER_LINEAR,
borderValue=self.borderValue) # BGR order borderValue
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
targets = sample['gt_bbox']
n = targets.shape[0]
points = targets.copy()
area0 = (points[:, 2] - points[:, 0]) * (
points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate(
(x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians)))**0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate(
(x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
if self.reject_outside:
np.clip(xy[:, 0], 0, width, out=xy[:, 0])
np.clip(xy[:, 2], 0, width, out=xy[:, 2])
np.clip(xy[:, 1], 0, height, out=xy[:, 1])
np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
if sum(i) > 0:
sample['gt_bbox'] = xy[i].astype(sample['gt_bbox'].dtype)
sample['gt_class'] = sample['gt_class'][i]
if 'difficult' in sample:
sample['difficult'] = sample['difficult'][i]
if 'gt_ide' in sample:
sample['gt_ide'] = sample['gt_ide'][i]
if 'is_crowd' in sample:
sample['is_crowd'] = sample['is_crowd'][i]
sample['image'] = imw
return sample
else:
return sample
@register_op
class Gt2JDETargetThres(BaseOperator):
__shared__ = ['num_classes']
"""
Generate JDE targets by groud truth data when training
Args:
anchors (list): anchors of JDE model
anchor_masks (list): anchor_masks of JDE model
downsample_ratios (list): downsample ratios of JDE model
ide_thresh (float): thresh of identity, higher is groud truth
fg_thresh (float): thresh of foreground, higher is foreground
bg_thresh (float): thresh of background, lower is background
num_classes (int): number of classes
"""
def __init__(self,
anchors,
anchor_masks,
downsample_ratios,
ide_thresh=0.5,
fg_thresh=0.5,
bg_thresh=0.4,
num_classes=1):
super(Gt2JDETargetThres, self).__init__()
self.anchors = anchors
self.anchor_masks = anchor_masks
self.downsample_ratios = downsample_ratios
self.ide_thresh = ide_thresh
self.fg_thresh = fg_thresh
self.bg_thresh = bg_thresh
self.num_classes = num_classes
def generate_anchor(self, nGh, nGw, anchor_hw):
nA = len(anchor_hw)
yy, xx = np.meshgrid(np.arange(nGh), np.arange(nGw))
mesh = np.stack([xx.T, yy.T], axis=0) # [2, nGh, nGw]
mesh = np.repeat(mesh[None, :], nA, axis=0) # [nA, 2, nGh, nGw]
anchor_offset_mesh = anchor_hw[:, :, None][:, :, :, None]
anchor_offset_mesh = np.repeat(anchor_offset_mesh, nGh, axis=-2)
anchor_offset_mesh = np.repeat(anchor_offset_mesh, nGw, axis=-1)
anchor_mesh = np.concatenate(
[mesh, anchor_offset_mesh], axis=1) # [nA, 4, nGh, nGw]
return anchor_mesh
def encode_delta(self, gt_box_list, fg_anchor_list):
px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
fg_anchor_list[:, 2], fg_anchor_list[:,3]
gx, gy, gw, gh = gt_box_list[:, 0], gt_box_list[:, 1], \
gt_box_list[:, 2], gt_box_list[:, 3]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = np.log(gw / pw)
dh = np.log(gh / ph)
return np.stack([dx, dy, dw, dh], axis=1)
def pad_box(self, sample, num_max):
assert 'gt_bbox' in sample
bbox = sample['gt_bbox']
gt_num = len(bbox)
pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
if gt_num > 0:
pad_bbox[:gt_num, :] = bbox[:gt_num, :]
sample['gt_bbox'] = pad_bbox
if 'gt_score' in sample:
pad_score = np.zeros((num_max, ), dtype=np.float32)
if gt_num > 0:
pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
sample['gt_score'] = pad_score
if 'difficult' in sample:
pad_diff = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
sample['difficult'] = pad_diff
if 'is_crowd' in sample:
pad_crowd = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
sample['is_crowd'] = pad_crowd
if 'gt_ide' in sample:
pad_ide = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_ide[:gt_num] = sample['gt_ide'][:gt_num, 0]
sample['gt_ide'] = pad_ide
return sample
def __call__(self, samples, context=None):
assert len(self.anchor_masks) == len(self.downsample_ratios), \
"anchor_masks', and 'downsample_ratios' should have same length."
h, w = samples[0]['image'].shape[1:3]
num_max = 0
for sample in samples:
num_max = max(num_max, len(sample['gt_bbox']))
for sample in samples:
gt_bbox = sample['gt_bbox']
gt_ide = sample['gt_ide']
for i, (anchor_hw, downsample_ratio
) in enumerate(zip(self.anchors, self.downsample_ratios)):
anchor_hw = np.array(
anchor_hw, dtype=np.float32) / downsample_ratio
nA = len(anchor_hw)
nGh, nGw = int(h / downsample_ratio), int(w / downsample_ratio)
tbox = np.zeros((nA, nGh, nGw, 4), dtype=np.float32)
tconf = np.zeros((nA, nGh, nGw), dtype=np.float32)
tid = -np.ones((nA, nGh, nGw, 1), dtype=np.float32)
gxy, gwh = gt_bbox[:, 0:2].copy(), gt_bbox[:, 2:4].copy()
gxy[:, 0] = gxy[:, 0] * nGw
gxy[:, 1] = gxy[:, 1] * nGh
gwh[:, 0] = gwh[:, 0] * nGw
gwh[:, 1] = gwh[:, 1] * nGh
gxy[:, 0] = np.clip(gxy[:, 0], 0, nGw - 1)
gxy[:, 1] = np.clip(gxy[:, 1], 0, nGh - 1)
tboxes = np.concatenate([gxy, gwh], axis=1)
anchor_mesh = self.generate_anchor(nGh, nGw, anchor_hw)
anchor_list = np.transpose(anchor_mesh,
(0, 2, 3, 1)).reshape(-1, 4)
iou_pdist = bbox_iou_np_expand(
anchor_list, tboxes, x1y1x2y2=False)
iou_max = np.max(iou_pdist, axis=1)
max_gt_index = np.argmax(iou_pdist, axis=1)
iou_map = iou_max.reshape(nA, nGh, nGw)
gt_index_map = max_gt_index.reshape(nA, nGh, nGw)
id_index = iou_map > self.ide_thresh
fg_index = iou_map > self.fg_thresh
bg_index = iou_map < self.bg_thresh
ign_index = (iou_map < self.fg_thresh) * (
iou_map > self.bg_thresh)
tconf[fg_index] = 1
tconf[bg_index] = 0
tconf[ign_index] = -1
gt_index = gt_index_map[fg_index]
gt_box_list = tboxes[gt_index]
gt_id_list = gt_ide[gt_index_map[id_index]]
if np.sum(fg_index) > 0:
tid[id_index] = gt_id_list
fg_anchor_list = anchor_list.reshape(nA, nGh, nGw,
4)[fg_index]
delta_target = self.encode_delta(gt_box_list,
fg_anchor_list)
tbox[fg_index] = delta_target
sample['tbox{}'.format(i)] = tbox
sample['tconf{}'.format(i)] = tconf
sample['tide{}'.format(i)] = tid
sample.pop('gt_class')
sample = self.pad_box(sample, num_max)
return samples
@register_op
class Gt2JDETargetMax(BaseOperator):
__shared__ = ['num_classes']
"""
Generate JDE targets by groud truth data when evaluating
Args:
anchors (list): anchors of JDE model
anchor_masks (list): anchor_masks of JDE model
downsample_ratios (list): downsample ratios of JDE model
max_iou_thresh (float): iou thresh for high quality anchor
num_classes (int): number of classes
"""
def __init__(self,
anchors,
anchor_masks,
downsample_ratios,
max_iou_thresh=0.60,
num_classes=1):
super(Gt2JDETargetMax, self).__init__()
self.anchors = anchors
self.anchor_masks = anchor_masks
self.downsample_ratios = downsample_ratios
self.max_iou_thresh = max_iou_thresh
self.num_classes = num_classes
def __call__(self, samples, context=None):
assert len(self.anchor_masks) == len(self.downsample_ratios), \
"anchor_masks', and 'downsample_ratios' should have same length."
h, w = samples[0]['image'].shape[1:3]
for sample in samples:
gt_bbox = sample['gt_bbox']
gt_ide = sample['gt_ide']
for i, (anchor_hw, downsample_ratio
) in enumerate(zip(self.anchors, self.downsample_ratios)):
anchor_hw = np.array(
anchor_hw, dtype=np.float32) / downsample_ratio
nA = len(anchor_hw)
nGh, nGw = int(h / downsample_ratio), int(w / downsample_ratio)
tbox = np.zeros((nA, nGh, nGw, 4), dtype=np.float32)
tconf = np.zeros((nA, nGh, nGw), dtype=np.float32)
tid = -np.ones((nA, nGh, nGw, 1), dtype=np.float32)
gxy, gwh = gt_bbox[:, 0:2].copy(), gt_bbox[:, 2:4].copy()
gxy[:, 0] = gxy[:, 0] * nGw
gxy[:, 1] = gxy[:, 1] * nGh
gwh[:, 0] = gwh[:, 0] * nGw
gwh[:, 1] = gwh[:, 1] * nGh
gi = np.clip(gxy[:, 0], 0, nGw - 1).astype(int)
gj = np.clip(gxy[:, 1], 0, nGh - 1).astype(int)
# iou of targets-anchors (using wh only)
box1 = gwh
box2 = anchor_hw[:, None, :]
inter_area = np.minimum(box1, box2).prod(2)
iou = inter_area / (
box1.prod(1) + box2.prod(2) - inter_area + 1e-16)
# Select best iou_pred and anchor
iou_best = iou.max(0) # best anchor [0-2] for each target
a = np.argmax(iou, axis=0)
# Select best unique target-anchor combinations
iou_order = np.argsort(-iou_best) # best to worst
# Unique anchor selection
u = np.stack((gi, gj, a), 0)[:, iou_order]
_, first_unique = np.unique(u, axis=1, return_index=True)
mask = iou_order[first_unique]
# best anchor must share significant commonality (iou) with target
# TODO: examine arbitrary threshold
idx = mask[iou_best[mask] > self.max_iou_thresh]
if len(idx) > 0:
a_i, gj_i, gi_i = a[idx], gj[idx], gi[idx]
t_box = gt_bbox[idx]
t_id = gt_ide[idx]
if len(t_box.shape) == 1:
t_box = t_box.reshape(1, 4)
gxy, gwh = t_box[:, 0:2].copy(), t_box[:, 2:4].copy()
gxy[:, 0] = gxy[:, 0] * nGw
gxy[:, 1] = gxy[:, 1] * nGh
gwh[:, 0] = gwh[:, 0] * nGw
gwh[:, 1] = gwh[:, 1] * nGh
# XY coordinates
tbox[:, :, :, 0:2][a_i, gj_i, gi_i] = gxy - gxy.astype(int)
# Width and height in yolo method
tbox[:, :, :, 2:4][a_i, gj_i, gi_i] = np.log(gwh /
anchor_hw[a_i])
tconf[a_i, gj_i, gi_i] = 1
tid[a_i, gj_i, gi_i] = t_id
sample['tbox{}'.format(i)] = tbox
sample['tconf{}'.format(i)] = tconf
sample['tide{}'.format(i)] = tid
class Gt2FairMOTTarget(Gt2TTFTarget):
__shared__ = ['num_classes']
"""
Generate FairMOT targets by ground truth data.
Difference between Gt2FairMOTTarget and Gt2TTFTarget are:
1. the gaussian kernal radius to generate a heatmap.
2. the targets needed during training.
Args:
num_classes(int): the number of classes.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
max_objs(int): the maximum number of ground truth objects in a image, 500 by default.
"""
def __init__(self, num_classes=1, down_ratio=4, max_objs=500):
super(Gt2TTFTarget, self).__init__()
self.down_ratio = down_ratio
self.num_classes = num_classes
self.max_objs = max_objs
def __call__(self, samples, context=None):
for b_id, sample in enumerate(samples):
output_h = sample['image'].shape[1] // self.down_ratio
output_w = sample['image'].shape[2] // self.down_ratio
heatmap = np.zeros(
(self.num_classes, output_h, output_w), dtype='float32')
bbox_size = np.zeros((self.max_objs, 4), dtype=np.float32)
center_offset = np.zeros((self.max_objs, 2), dtype=np.float32)
index = np.zeros((self.max_objs, ), dtype=np.int64)
index_mask = np.zeros((self.max_objs, ), dtype=np.int32)
reid = np.zeros((self.max_objs, ), dtype=np.int64)
bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32)
if self.num_classes > 1:
# each category corresponds to a set of track ids
cls_tr_ids = np.zeros(
(self.num_classes, output_h, output_w), dtype=np.int64)
cls_id_map = np.full((output_h, output_w), -1, dtype=np.int64)
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
gt_ide = sample['gt_ide']
for k in range(len(gt_bbox)):
cls_id = gt_class[k][0]
bbox = gt_bbox[k]
ide = gt_ide[k][0]
bbox[[0, 2]] = bbox[[0, 2]] * output_w
bbox[[1, 3]] = bbox[[1, 3]] * output_h
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2.
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1)
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox)
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)), 0.7)
radius = max(0, int(radius))
ct = np.array([bbox[0], bbox[1]], dtype=np.float32)
ct_int = ct.astype(np.int32)
self.draw_truncate_gaussian(heatmap[cls_id], ct_int, radius,
radius)
bbox_size[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
index[k] = ct_int[1] * output_w + ct_int[0]
center_offset[k] = ct - ct_int
index_mask[k] = 1
reid[k] = ide
bbox_xys[k] = bbox_xy
if self.num_classes > 1:
cls_id_map[ct_int[1], ct_int[0]] = cls_id
cls_tr_ids[cls_id][ct_int[1]][ct_int[0]] = ide - 1
# track id start from 0
sample['heatmap'] = heatmap
sample['index'] = index
sample['offset'] = center_offset
sample['size'] = bbox_size
sample['index_mask'] = index_mask
sample['reid'] = reid
if self.num_classes > 1:
sample['cls_id_map'] = cls_id_map
sample['cls_tr_ids'] = cls_tr_ids
sample['bbox_xys'] = bbox_xys
sample.pop('is_crowd', None)
sample.pop('difficult', None)
sample.pop('gt_class', None)
sample.pop('gt_bbox', None)
sample.pop('gt_score', None)
sample.pop('gt_ide', None)
return samples
| PaddleDetection/ppdet/data/transform/mot_operators.py/0 | {
"file_path": "PaddleDetection/ppdet/data/transform/mot_operators.py",
"repo_id": "PaddleDetection",
"token_count": 14062
} | 65 |
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../rbox_iou/rbox_iou_utils.h"
#include "paddle/extension.h"
template <typename T>
void nms_rotated_cpu_kernel(const T *boxes_data, const float threshold,
const int64_t num_boxes, int64_t *num_keep_boxes,
int64_t *output_data) {
int num_masks = CeilDiv(num_boxes, 64);
std::vector<int64_t> masks(num_masks, 0);
for (int64_t i = 0; i < num_boxes; ++i) {
if (masks[i / 64] & 1ULL << (i % 64))
continue;
T box_1[5];
for (int k = 0; k < 5; ++k) {
box_1[k] = boxes_data[i * 5 + k];
}
for (int64_t j = i + 1; j < num_boxes; ++j) {
if (masks[j / 64] & 1ULL << (j % 64))
continue;
T box_2[5];
for (int k = 0; k < 5; ++k) {
box_2[k] = boxes_data[j * 5 + k];
}
if (rbox_iou_single<T>(box_1, box_2) > threshold) {
masks[j / 64] |= 1ULL << (j % 64);
}
}
}
int64_t output_data_idx = 0;
for (int64_t i = 0; i < num_boxes; ++i) {
if (masks[i / 64] & 1ULL << (i % 64))
continue;
output_data[output_data_idx++] = i;
}
*num_keep_boxes = output_data_idx;
for (; output_data_idx < num_boxes; ++output_data_idx) {
output_data[output_data_idx] = 0;
}
}
#define CHECK_INPUT_CPU(x) \
PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
std::vector<paddle::Tensor> NMSRotatedCPUForward(const paddle::Tensor &boxes,
const paddle::Tensor &scores,
float threshold) {
CHECK_INPUT_CPU(boxes);
CHECK_INPUT_CPU(scores);
auto num_boxes = boxes.shape()[0];
auto order_t =
std::get<1>(paddle::argsort(scores, /* axis=*/0, /* descending=*/true));
auto boxes_sorted = paddle::gather(boxes, order_t, /* axis=*/0);
auto keep =
paddle::empty({num_boxes}, paddle::DataType::INT64, paddle::CPUPlace());
int64_t num_keep_boxes = 0;
PD_DISPATCH_FLOATING_TYPES(boxes.type(), "nms_rotated_cpu_kernel", ([&] {
nms_rotated_cpu_kernel<data_t>(
boxes_sorted.data<data_t>(), threshold,
num_boxes, &num_keep_boxes,
keep.data<int64_t>());
}));
keep = keep.slice(0, num_keep_boxes);
return {paddle::gather(order_t, keep, /* axis=*/0)};
}
#ifdef PADDLE_WITH_CUDA
std::vector<paddle::Tensor> NMSRotatedCUDAForward(const paddle::Tensor &boxes,
const paddle::Tensor &scores,
float threshold);
#endif
std::vector<paddle::Tensor> NMSRotatedForward(const paddle::Tensor &boxes,
const paddle::Tensor &scores,
float threshold) {
if (boxes.is_cpu()) {
return NMSRotatedCPUForward(boxes, scores, threshold);
#ifdef PADDLE_WITH_CUDA
} else if (boxes.is_gpu()) {
return NMSRotatedCUDAForward(boxes, scores, threshold);
#endif
}
}
std::vector<std::vector<int64_t>>
NMSRotatedInferShape(std::vector<int64_t> boxes_shape,
std::vector<int64_t> scores_shape) {
return {{-1}};
}
std::vector<paddle::DataType> NMSRotatedInferDtype(paddle::DataType t1,
paddle::DataType t2) {
return {paddle::DataType::INT64};
}
PD_BUILD_OP(nms_rotated)
.Inputs({"Boxes", "Scores"})
.Outputs({"Output"})
.Attrs({"threshold: float"})
.SetKernelFn(PD_KERNEL(NMSRotatedForward))
.SetInferShapeFn(PD_INFER_SHAPE(NMSRotatedInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(NMSRotatedInferDtype)); | PaddleDetection/ppdet/ext_op/csrc/nms_rotated/nms_rotated.cc/0 | {
"file_path": "PaddleDetection/ppdet/ext_op/csrc/nms_rotated/nms_rotated.cc",
"repo_id": "PaddleDetection",
"token_count": 2171
} | 66 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import sys
import math
from collections import defaultdict
import numpy as np
from ppdet.modeling.bbox_utils import bbox_iou_np_expand
from .map_utils import ap_per_class
from .metrics import Metric
from .munkres import Munkres
try:
import motmetrics as mm
mm.lap.default_solver = 'lap'
except:
print(
'Warning: Unable to use MOT metric, please install motmetrics, for example: `pip install motmetrics`, see https://github.com/longcw/py-motmetrics'
)
pass
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['MOTEvaluator', 'MOTMetric', 'JDEDetMetric', 'KITTIMOTMetric']
def read_mot_results(filename, is_gt=False, is_ignore=False):
valid_label = [1]
ignore_labels = [2, 7, 8, 12] # only in motchallenge datasets like 'MOT16'
if is_gt:
logger.info(
"In MOT16/17 dataset the valid_label of ground truth is '{}', "
"in other dataset it should be '0' for single classs MOT.".format(
valid_label[0]))
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_label:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename or 'MOT15-' in filename or 'MOT20-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
"""
MOT dataset label list, see in https://motchallenge.net
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... % 10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores
class MOTEvaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
try:
import motmetrics as mm
mm.lap.default_solver = 'lap'
except Exception as e:
raise RuntimeError(
'Unable to use MOT metric, please install motmetrics, for example: `pip install motmetrics`, see https://github.com/longcw/py-motmetrics'
)
self.reset_accumulator()
def load_annotations(self):
assert self.data_type == 'mot'
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt',
'gt.txt')
if not os.path.exists(gt_filename):
logger.warning(
"gt_filename '{}' of MOTEvaluator is not exist, so the MOTA will be -INF."
)
self.gt_frame_dict = read_mot_results(gt_filename, is_gt=True)
self.gt_ignore_frame_dict = read_mot_results(
gt_filename, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
# results
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
# gts
gt_objs = self.gt_frame_dict.get(frame_id, [])
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# ignore boxes
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
# remove ignored results
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(
ignore_tlwhs, trk_tlwhs, max_iou=0.5)
if len(iou_distance) > 0:
match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
match_ious = iou_distance[match_is, match_js]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
# get distance matrix
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
# acc
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc,
'last_mot_events'):
events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_mot_results(filename, is_gt=False)
frames = sorted(list(set(result_frame_dict.keys())))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs,
names,
metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1',
'precision', 'recall')):
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs, metrics=metrics, names=names, generate_overall=True)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
class MOTMetric(Metric):
def __init__(self, save_summary=False):
self.save_summary = save_summary
self.MOTEvaluator = MOTEvaluator
self.result_root = None
self.reset()
def reset(self):
self.accs = []
self.seqs = []
def update(self, data_root, seq, data_type, result_root, result_filename):
evaluator = self.MOTEvaluator(data_root, seq, data_type)
self.accs.append(evaluator.eval_file(result_filename))
self.seqs.append(seq)
self.result_root = result_root
def accumulate(self):
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = self.MOTEvaluator.get_summary(self.accs, self.seqs, metrics)
self.strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names)
if self.save_summary:
self.MOTEvaluator.save_summary(
summary, os.path.join(self.result_root, 'summary.xlsx'))
def log(self):
print(self.strsummary)
def get_results(self):
return self.strsummary
class JDEDetMetric(Metric):
# Note this detection AP metric is different from COCOMetric or VOCMetric,
# and the bboxes coordinates are not scaled to the original image
def __init__(self, overlap_thresh=0.5):
self.overlap_thresh = overlap_thresh
self.reset()
def reset(self):
self.AP_accum = np.zeros(1)
self.AP_accum_count = np.zeros(1)
def update(self, inputs, outputs):
bboxes = outputs['bbox'][:, 2:].numpy()
scores = outputs['bbox'][:, 1].numpy()
labels = outputs['bbox'][:, 0].numpy()
bbox_lengths = outputs['bbox_num'].numpy()
if bboxes.shape[0] == 1 and bboxes.sum() == 0.0:
return
gt_boxes = inputs['gt_bbox'].numpy()[0]
gt_labels = inputs['gt_class'].numpy()[0]
if gt_labels.shape[0] == 0:
return
correct = []
detected = []
for i in range(bboxes.shape[0]):
obj_pred = 0
pred_bbox = bboxes[i].reshape(1, 4)
# Compute iou with target boxes
iou = bbox_iou_np_expand(pred_bbox, gt_boxes, x1y1x2y2=True)[0]
# Extract index of largest overlap
best_i = np.argmax(iou)
# If overlap exceeds threshold and classification is correct mark as correct
if iou[best_i] > self.overlap_thresh and obj_pred == gt_labels[
best_i] and best_i not in detected:
correct.append(1)
detected.append(best_i)
else:
correct.append(0)
# Compute Average Precision (AP) per class
target_cls = list(gt_labels.T[0])
AP, AP_class, R, P = ap_per_class(
tp=correct,
conf=scores,
pred_cls=np.zeros_like(scores),
target_cls=target_cls)
self.AP_accum_count += np.bincount(AP_class, minlength=1)
self.AP_accum += np.bincount(AP_class, minlength=1, weights=AP)
def accumulate(self):
logger.info("Accumulating evaluatation results...")
self.map_stat = self.AP_accum[0] / (self.AP_accum_count[0] + 1E-16)
def log(self):
map_stat = 100. * self.map_stat
logger.info("mAP({:.2f}) = {:.2f}%".format(self.overlap_thresh,
map_stat))
def get_results(self):
return self.map_stat
"""
Following code is borrow from https://github.com/xingyizhou/CenterTrack/blob/master/src/tools/eval_kitti_track/evaluate_tracking.py
"""
class tData:
"""
Utility class to load data.
"""
def __init__(self,frame=-1,obj_type="unset",truncation=-1,occlusion=-1,\
obs_angle=-10,x1=-1,y1=-1,x2=-1,y2=-1,w=-1,h=-1,l=-1,\
X=-1000,Y=-1000,Z=-1000,yaw=-10,score=-1000,track_id=-1):
"""
Constructor, initializes the object given the parameters.
"""
self.frame = frame
self.track_id = track_id
self.obj_type = obj_type
self.truncation = truncation
self.occlusion = occlusion
self.obs_angle = obs_angle
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.w = w
self.h = h
self.l = l
self.X = X
self.Y = Y
self.Z = Z
self.yaw = yaw
self.score = score
self.ignored = False
self.valid = False
self.tracker = -1
def __str__(self):
attrs = vars(self)
return '\n'.join("%s: %s" % item for item in attrs.items())
class KITTIEvaluation(object):
""" KITTI tracking statistics (CLEAR MOT, id-switches, fragments, ML/PT/MT, precision/recall)
MOTA - Multi-object tracking accuracy in [0,100]
MOTP - Multi-object tracking precision in [0,100] (3D) / [td,100] (2D)
MOTAL - Multi-object tracking accuracy in [0,100] with log10(id-switches)
id-switches - number of id switches
fragments - number of fragmentations
MT, PT, ML - number of mostly tracked, partially tracked and mostly lost trajectories
recall - recall = percentage of detected targets
precision - precision = percentage of correctly detected targets
FAR - number of false alarms per frame
falsepositives - number of false positives (FP)
missed - number of missed targets (FN)
"""
def __init__(self, result_path, gt_path, min_overlap=0.5, max_truncation = 0,\
min_height = 25, max_occlusion = 2, cls="car",\
n_frames=[], seqs=[], n_sequences=0):
# get number of sequences and
# get number of frames per sequence from test mapping
# (created while extracting the benchmark)
self.gt_path = os.path.join(gt_path, "../labels")
self.n_frames = n_frames
self.sequence_name = seqs
self.n_sequences = n_sequences
self.cls = cls # class to evaluate, i.e. pedestrian or car
self.result_path = result_path
# statistics and numbers for evaluation
self.n_gt = 0 # number of ground truth detections minus ignored false negatives and true positives
self.n_igt = 0 # number of ignored ground truth detections
self.n_gts = [
] # number of ground truth detections minus ignored false negatives and true positives PER SEQUENCE
self.n_igts = [
] # number of ground ignored truth detections PER SEQUENCE
self.n_gt_trajectories = 0
self.n_gt_seq = []
self.n_tr = 0 # number of tracker detections minus ignored tracker detections
self.n_trs = [
] # number of tracker detections minus ignored tracker detections PER SEQUENCE
self.n_itr = 0 # number of ignored tracker detections
self.n_itrs = [] # number of ignored tracker detections PER SEQUENCE
self.n_igttr = 0 # number of ignored ground truth detections where the corresponding associated tracker detection is also ignored
self.n_tr_trajectories = 0
self.n_tr_seq = []
self.MOTA = 0
self.MOTP = 0
self.MOTAL = 0
self.MODA = 0
self.MODP = 0
self.MODP_t = []
self.recall = 0
self.precision = 0
self.F1 = 0
self.FAR = 0
self.total_cost = 0
self.itp = 0 # number of ignored true positives
self.itps = [] # number of ignored true positives PER SEQUENCE
self.tp = 0 # number of true positives including ignored true positives!
self.tps = [
] # number of true positives including ignored true positives PER SEQUENCE
self.fn = 0 # number of false negatives WITHOUT ignored false negatives
self.fns = [
] # number of false negatives WITHOUT ignored false negatives PER SEQUENCE
self.ifn = 0 # number of ignored false negatives
self.ifns = [] # number of ignored false negatives PER SEQUENCE
self.fp = 0 # number of false positives
# a bit tricky, the number of ignored false negatives and ignored true positives
# is subtracted, but if both tracker detection and ground truth detection
# are ignored this number is added again to avoid double counting
self.fps = [] # above PER SEQUENCE
self.mme = 0
self.fragments = 0
self.id_switches = 0
self.MT = 0
self.PT = 0
self.ML = 0
self.min_overlap = min_overlap # minimum bounding box overlap for 3rd party metrics
self.max_truncation = max_truncation # maximum truncation of an object for evaluation
self.max_occlusion = max_occlusion # maximum occlusion of an object for evaluation
self.min_height = min_height # minimum height of an object for evaluation
self.n_sample_points = 500
# this should be enough to hold all groundtruth trajectories
# is expanded if necessary and reduced in any case
self.gt_trajectories = [[] for x in range(self.n_sequences)]
self.ign_trajectories = [[] for x in range(self.n_sequences)]
def loadGroundtruth(self):
try:
self._loadData(self.gt_path, cls=self.cls, loading_groundtruth=True)
except IOError:
return False
return True
def loadTracker(self):
try:
if not self._loadData(
self.result_path, cls=self.cls, loading_groundtruth=False):
return False
except IOError:
return False
return True
def _loadData(self,
root_dir,
cls,
min_score=-1000,
loading_groundtruth=False):
"""
Generic loader for ground truth and tracking data.
Use loadGroundtruth() or loadTracker() to load this data.
Loads detections in KITTI format from textfiles.
"""
# construct objectDetections object to hold detection data
t_data = tData()
data = []
eval_2d = True
eval_3d = True
seq_data = []
n_trajectories = 0
n_trajectories_seq = []
for seq, s_name in enumerate(self.sequence_name):
i = 0
filename = os.path.join(root_dir, "%s.txt" % s_name)
f = open(filename, "r")
f_data = [
[] for x in range(self.n_frames[seq])
] # current set has only 1059 entries, sufficient length is checked anyway
ids = []
n_in_seq = 0
id_frame_cache = []
for line in f:
# KITTI tracking benchmark data format:
# (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)
line = line.strip()
fields = line.split(" ")
# classes that should be loaded (ignored neighboring classes)
if "car" in cls.lower():
classes = ["car", "van"]
elif "pedestrian" in cls.lower():
classes = ["pedestrian", "person_sitting"]
else:
classes = [cls.lower()]
classes += ["dontcare"]
if not any([s for s in classes if s in fields[2].lower()]):
continue
# get fields from table
t_data.frame = int(float(fields[0])) # frame
t_data.track_id = int(float(fields[1])) # id
t_data.obj_type = fields[
2].lower() # object type [car, pedestrian, cyclist, ...]
t_data.truncation = int(
float(fields[3])) # truncation [-1,0,1,2]
t_data.occlusion = int(
float(fields[4])) # occlusion [-1,0,1,2]
t_data.obs_angle = float(fields[5]) # observation angle [rad]
t_data.x1 = float(fields[6]) # left [px]
t_data.y1 = float(fields[7]) # top [px]
t_data.x2 = float(fields[8]) # right [px]
t_data.y2 = float(fields[9]) # bottom [px]
t_data.h = float(fields[10]) # height [m]
t_data.w = float(fields[11]) # width [m]
t_data.l = float(fields[12]) # length [m]
t_data.X = float(fields[13]) # X [m]
t_data.Y = float(fields[14]) # Y [m]
t_data.Z = float(fields[15]) # Z [m]
t_data.yaw = float(fields[16]) # yaw angle [rad]
if not loading_groundtruth:
if len(fields) == 17:
t_data.score = -1
elif len(fields) == 18:
t_data.score = float(fields[17]) # detection score
else:
logger.info("file is not in KITTI format")
return
# do not consider objects marked as invalid
if t_data.track_id is -1 and t_data.obj_type != "dontcare":
continue
idx = t_data.frame
# check if length for frame data is sufficient
if idx >= len(f_data):
print("extend f_data", idx, len(f_data))
f_data += [[] for x in range(max(500, idx - len(f_data)))]
try:
id_frame = (t_data.frame, t_data.track_id)
if id_frame in id_frame_cache and not loading_groundtruth:
logger.info(
"track ids are not unique for sequence %d: frame %d"
% (seq, t_data.frame))
logger.info(
"track id %d occurred at least twice for this frame"
% t_data.track_id)
logger.info("Exiting...")
#continue # this allows to evaluate non-unique result files
return False
id_frame_cache.append(id_frame)
f_data[t_data.frame].append(copy.copy(t_data))
except:
print(len(f_data), idx)
raise
if t_data.track_id not in ids and t_data.obj_type != "dontcare":
ids.append(t_data.track_id)
n_trajectories += 1
n_in_seq += 1
# check if uploaded data provides information for 2D and 3D evaluation
if not loading_groundtruth and eval_2d is True and (
t_data.x1 == -1 or t_data.x2 == -1 or t_data.y1 == -1 or
t_data.y2 == -1):
eval_2d = False
if not loading_groundtruth and eval_3d is True and (
t_data.X == -1000 or t_data.Y == -1000 or
t_data.Z == -1000):
eval_3d = False
# only add existing frames
n_trajectories_seq.append(n_in_seq)
seq_data.append(f_data)
f.close()
if not loading_groundtruth:
self.tracker = seq_data
self.n_tr_trajectories = n_trajectories
self.eval_2d = eval_2d
self.eval_3d = eval_3d
self.n_tr_seq = n_trajectories_seq
if self.n_tr_trajectories == 0:
return False
else:
# split ground truth and DontCare areas
self.dcareas = []
self.groundtruth = []
for seq_idx in range(len(seq_data)):
seq_gt = seq_data[seq_idx]
s_g, s_dc = [], []
for f in range(len(seq_gt)):
all_gt = seq_gt[f]
g, dc = [], []
for gg in all_gt:
if gg.obj_type == "dontcare":
dc.append(gg)
else:
g.append(gg)
s_g.append(g)
s_dc.append(dc)
self.dcareas.append(s_dc)
self.groundtruth.append(s_g)
self.n_gt_seq = n_trajectories_seq
self.n_gt_trajectories = n_trajectories
return True
def boxoverlap(self, a, b, criterion="union"):
"""
boxoverlap computes intersection over union for bbox a and b in KITTI format.
If the criterion is 'union', overlap = (a inter b) / a union b).
If the criterion is 'a', overlap = (a inter b) / a, where b should be a dontcare area.
"""
x1 = max(a.x1, b.x1)
y1 = max(a.y1, b.y1)
x2 = min(a.x2, b.x2)
y2 = min(a.y2, b.y2)
w = x2 - x1
h = y2 - y1
if w <= 0. or h <= 0.:
return 0.
inter = w * h
aarea = (a.x2 - a.x1) * (a.y2 - a.y1)
barea = (b.x2 - b.x1) * (b.y2 - b.y1)
# intersection over union overlap
if criterion.lower() == "union":
o = inter / float(aarea + barea - inter)
elif criterion.lower() == "a":
o = float(inter) / float(aarea)
else:
raise TypeError("Unkown type for criterion")
return o
def compute3rdPartyMetrics(self):
"""
Computes the metrics defined in
- Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics
MOTA, MOTAL, MOTP
- Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows
MT/PT/ML
"""
# construct Munkres object for Hungarian Method association
hm = Munkres()
max_cost = 1e9
# go through all frames and associate ground truth and tracker results
# groundtruth and tracker contain lists for every single frame containing lists of KITTI format detections
fr, ids = 0, 0
for seq_idx in range(len(self.groundtruth)):
seq_gt = self.groundtruth[seq_idx]
seq_dc = self.dcareas[seq_idx] # don't care areas
seq_tracker = self.tracker[seq_idx]
seq_trajectories = defaultdict(list)
seq_ignored = defaultdict(list)
# statistics over the current sequence, check the corresponding
# variable comments in __init__ to get their meaning
seqtp = 0
seqitp = 0
seqfn = 0
seqifn = 0
seqfp = 0
seqigt = 0
seqitr = 0
last_ids = [[], []]
n_gts = 0
n_trs = 0
for f in range(len(seq_gt)):
g = seq_gt[f]
dc = seq_dc[f]
t = seq_tracker[f]
# counting total number of ground truth and tracker objects
self.n_gt += len(g)
self.n_tr += len(t)
n_gts += len(g)
n_trs += len(t)
# use hungarian method to associate, using boxoverlap 0..1 as cost
# build cost matrix
cost_matrix = []
this_ids = [[], []]
for gg in g:
# save current ids
this_ids[0].append(gg.track_id)
this_ids[1].append(-1)
gg.tracker = -1
gg.id_switch = 0
gg.fragmentation = 0
cost_row = []
for tt in t:
# overlap == 1 is cost ==0
c = 1 - self.boxoverlap(gg, tt)
# gating for boxoverlap
if c <= self.min_overlap:
cost_row.append(c)
else:
cost_row.append(max_cost) # = 1e9
cost_matrix.append(cost_row)
# all ground truth trajectories are initially not associated
# extend groundtruth trajectories lists (merge lists)
seq_trajectories[gg.track_id].append(-1)
seq_ignored[gg.track_id].append(False)
if len(g) is 0:
cost_matrix = [[]]
# associate
association_matrix = hm.compute(cost_matrix)
# tmp variables for sanity checks and MODP computation
tmptp = 0
tmpfp = 0
tmpfn = 0
tmpc = 0 # this will sum up the overlaps for all true positives
tmpcs = [0] * len(
g) # this will save the overlaps for all true positives
# the reason is that some true positives might be ignored
# later such that the corrsponding overlaps can
# be subtracted from tmpc for MODP computation
# mapping for tracker ids and ground truth ids
for row, col in association_matrix:
# apply gating on boxoverlap
c = cost_matrix[row][col]
if c < max_cost:
g[row].tracker = t[col].track_id
this_ids[1][row] = t[col].track_id
t[col].valid = True
g[row].distance = c
self.total_cost += 1 - c
tmpc += 1 - c
tmpcs[row] = 1 - c
seq_trajectories[g[row].track_id][-1] = t[col].track_id
# true positives are only valid associations
self.tp += 1
tmptp += 1
else:
g[row].tracker = -1
self.fn += 1
tmpfn += 1
# associate tracker and DontCare areas
# ignore tracker in neighboring classes
nignoredtracker = 0 # number of ignored tracker detections
ignoredtrackers = dict() # will associate the track_id with -1
# if it is not ignored and 1 if it is
# ignored;
# this is used to avoid double counting ignored
# cases, see the next loop
for tt in t:
ignoredtrackers[tt.track_id] = -1
# ignore detection if it belongs to a neighboring class or is
# smaller or equal to the minimum height
tt_height = abs(tt.y1 - tt.y2)
if ((self.cls == "car" and tt.obj_type == "van") or
(self.cls == "pedestrian" and
tt.obj_type == "person_sitting") or
tt_height <= self.min_height) and not tt.valid:
nignoredtracker += 1
tt.ignored = True
ignoredtrackers[tt.track_id] = 1
continue
for d in dc:
overlap = self.boxoverlap(tt, d, "a")
if overlap > 0.5 and not tt.valid:
tt.ignored = True
nignoredtracker += 1
ignoredtrackers[tt.track_id] = 1
break
# check for ignored FN/TP (truncation or neighboring object class)
ignoredfn = 0 # the number of ignored false negatives
nignoredtp = 0 # the number of ignored true positives
nignoredpairs = 0 # the number of ignored pairs, i.e. a true positive
# which is ignored but where the associated tracker
# detection has already been ignored
gi = 0
for gg in g:
if gg.tracker < 0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
ignoredfn += 1
elif gg.tracker >= 0:
if gg.occlusion>self.max_occlusion or gg.truncation>self.max_truncation\
or (self.cls=="car" and gg.obj_type=="van") or (self.cls=="pedestrian" and gg.obj_type=="person_sitting"):
seq_ignored[gg.track_id][-1] = True
gg.ignored = True
nignoredtp += 1
# if the associated tracker detection is already ignored,
# we want to avoid double counting ignored detections
if ignoredtrackers[gg.tracker] > 0:
nignoredpairs += 1
# for computing MODP, the overlaps from ignored detections
# are subtracted
tmpc -= tmpcs[gi]
gi += 1
# the below might be confusion, check the comments in __init__
# to see what the individual statistics represent
# correct TP by number of ignored TP due to truncation
# ignored TP are shown as tracked in visualization
tmptp -= nignoredtp
# count the number of ignored true positives
self.itp += nignoredtp
# adjust the number of ground truth objects considered
self.n_gt -= (ignoredfn + nignoredtp)
# count the number of ignored ground truth objects
self.n_igt += ignoredfn + nignoredtp
# count the number of ignored tracker objects
self.n_itr += nignoredtracker
# count the number of ignored pairs, i.e. associated tracker and
# ground truth objects that are both ignored
self.n_igttr += nignoredpairs
# false negatives = associated gt bboxes exceding association threshold + non-associated gt bboxes
tmpfn += len(g) - len(association_matrix) - ignoredfn
self.fn += len(g) - len(association_matrix) - ignoredfn
self.ifn += ignoredfn
# false positives = tracker bboxes - associated tracker bboxes
# mismatches (mme_t)
tmpfp += len(
t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
self.fp += len(
t) - tmptp - nignoredtracker - nignoredtp + nignoredpairs
# update sequence data
seqtp += tmptp
seqitp += nignoredtp
seqfp += tmpfp
seqfn += tmpfn
seqifn += ignoredfn
seqigt += ignoredfn + nignoredtp
seqitr += nignoredtracker
# sanity checks
# - the number of true positives minues ignored true positives
# should be greater or equal to 0
# - the number of false negatives should be greater or equal to 0
# - the number of false positives needs to be greater or equal to 0
# otherwise ignored detections might be counted double
# - the number of counted true positives (plus ignored ones)
# and the number of counted false negatives (plus ignored ones)
# should match the total number of ground truth objects
# - the number of counted true positives (plus ignored ones)
# and the number of counted false positives
# plus the number of ignored tracker detections should
# match the total number of tracker detections; note that
# nignoredpairs is subtracted here to avoid double counting
# of ignored detection sin nignoredtp and nignoredtracker
if tmptp < 0:
print(tmptp, nignoredtp)
raise NameError("Something went wrong! TP is negative")
if tmpfn < 0:
print(tmpfn,
len(g),
len(association_matrix), ignoredfn, nignoredpairs)
raise NameError("Something went wrong! FN is negative")
if tmpfp < 0:
print(tmpfp,
len(t), tmptp, nignoredtracker, nignoredtp,
nignoredpairs)
raise NameError("Something went wrong! FP is negative")
if tmptp + tmpfn is not len(g) - ignoredfn - nignoredtp:
print("seqidx", seq_idx)
print("frame ", f)
print("TP ", tmptp)
print("FN ", tmpfn)
print("FP ", tmpfp)
print("nGT ", len(g))
print("nAss ", len(association_matrix))
print("ign GT", ignoredfn)
print("ign TP", nignoredtp)
raise NameError(
"Something went wrong! nGroundtruth is not TP+FN")
if tmptp + tmpfp + nignoredtp + nignoredtracker - nignoredpairs is not len(
t):
print(seq_idx, f, len(t), tmptp, tmpfp)
print(len(association_matrix), association_matrix)
raise NameError(
"Something went wrong! nTracker is not TP+FP")
# check for id switches or fragmentations
for i, tt in enumerate(this_ids[0]):
if tt in last_ids[0]:
idx = last_ids[0].index(tt)
tid = this_ids[1][i]
lid = last_ids[1][idx]
if tid != lid and lid != -1 and tid != -1:
if g[i].truncation < self.max_truncation:
g[i].id_switch = 1
ids += 1
if tid != lid and lid != -1:
if g[i].truncation < self.max_truncation:
g[i].fragmentation = 1
fr += 1
# save current index
last_ids = this_ids
# compute MOTP_t
MODP_t = 1
if tmptp != 0:
MODP_t = tmpc / float(tmptp)
self.MODP_t.append(MODP_t)
# remove empty lists for current gt trajectories
self.gt_trajectories[seq_idx] = seq_trajectories
self.ign_trajectories[seq_idx] = seq_ignored
# gather statistics for "per sequence" statistics.
self.n_gts.append(n_gts)
self.n_trs.append(n_trs)
self.tps.append(seqtp)
self.itps.append(seqitp)
self.fps.append(seqfp)
self.fns.append(seqfn)
self.ifns.append(seqifn)
self.n_igts.append(seqigt)
self.n_itrs.append(seqitr)
# compute MT/PT/ML, fragments, idswitches for all groundtruth trajectories
n_ignored_tr_total = 0
for seq_idx, (
seq_trajectories, seq_ignored
) in enumerate(zip(self.gt_trajectories, self.ign_trajectories)):
if len(seq_trajectories) == 0:
continue
tmpMT, tmpML, tmpPT, tmpId_switches, tmpFragments = [0] * 5
n_ignored_tr = 0
for g, ign_g in zip(seq_trajectories.values(),
seq_ignored.values()):
# all frames of this gt trajectory are ignored
if all(ign_g):
n_ignored_tr += 1
n_ignored_tr_total += 1
continue
# all frames of this gt trajectory are not assigned to any detections
if all([this == -1 for this in g]):
tmpML += 1
self.ML += 1
continue
# compute tracked frames in trajectory
last_id = g[0]
# first detection (necessary to be in gt_trajectories) is always tracked
tracked = 1 if g[0] >= 0 else 0
lgt = 0 if ign_g[0] else 1
for f in range(1, len(g)):
if ign_g[f]:
last_id = -1
continue
lgt += 1
if last_id != g[f] and last_id != -1 and g[f] != -1 and g[
f - 1] != -1:
tmpId_switches += 1
self.id_switches += 1
if f < len(g) - 1 and g[f - 1] != g[
f] and last_id != -1 and g[f] != -1 and g[f +
1] != -1:
tmpFragments += 1
self.fragments += 1
if g[f] != -1:
tracked += 1
last_id = g[f]
# handle last frame; tracked state is handled in for loop (g[f]!=-1)
if len(g) > 1 and g[f - 1] != g[f] and last_id != -1 and g[
f] != -1 and not ign_g[f]:
tmpFragments += 1
self.fragments += 1
# compute MT/PT/ML
tracking_ratio = tracked / float(len(g) - sum(ign_g))
if tracking_ratio > 0.8:
tmpMT += 1
self.MT += 1
elif tracking_ratio < 0.2:
tmpML += 1
self.ML += 1
else: # 0.2 <= tracking_ratio <= 0.8
tmpPT += 1
self.PT += 1
if (self.n_gt_trajectories - n_ignored_tr_total) == 0:
self.MT = 0.
self.PT = 0.
self.ML = 0.
else:
self.MT /= float(self.n_gt_trajectories - n_ignored_tr_total)
self.PT /= float(self.n_gt_trajectories - n_ignored_tr_total)
self.ML /= float(self.n_gt_trajectories - n_ignored_tr_total)
# precision/recall etc.
if (self.fp + self.tp) == 0 or (self.tp + self.fn) == 0:
self.recall = 0.
self.precision = 0.
else:
self.recall = self.tp / float(self.tp + self.fn)
self.precision = self.tp / float(self.fp + self.tp)
if (self.recall + self.precision) == 0:
self.F1 = 0.
else:
self.F1 = 2. * (self.precision * self.recall) / (
self.precision + self.recall)
if sum(self.n_frames) == 0:
self.FAR = "n/a"
else:
self.FAR = self.fp / float(sum(self.n_frames))
# compute CLEARMOT
if self.n_gt == 0:
self.MOTA = -float("inf")
self.MODA = -float("inf")
else:
self.MOTA = 1 - (self.fn + self.fp + self.id_switches
) / float(self.n_gt)
self.MODA = 1 - (self.fn + self.fp) / float(self.n_gt)
if self.tp == 0:
self.MOTP = float("inf")
else:
self.MOTP = self.total_cost / float(self.tp)
if self.n_gt != 0:
if self.id_switches == 0:
self.MOTAL = 1 - (self.fn + self.fp + self.id_switches
) / float(self.n_gt)
else:
self.MOTAL = 1 - (self.fn + self.fp +
math.log10(self.id_switches)
) / float(self.n_gt)
else:
self.MOTAL = -float("inf")
if sum(self.n_frames) == 0:
self.MODP = "n/a"
else:
self.MODP = sum(self.MODP_t) / float(sum(self.n_frames))
return True
def createSummary(self):
summary = ""
summary += "tracking evaluation summary".center(80, "=") + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTA)",
self.MOTA) + "\n"
summary += self.printEntry("Multiple Object Tracking Precision (MOTP)",
self.MOTP) + "\n"
summary += self.printEntry("Multiple Object Tracking Accuracy (MOTAL)",
self.MOTAL) + "\n"
summary += self.printEntry("Multiple Object Detection Accuracy (MODA)",
self.MODA) + "\n"
summary += self.printEntry("Multiple Object Detection Precision (MODP)",
self.MODP) + "\n"
summary += "\n"
summary += self.printEntry("Recall", self.recall) + "\n"
summary += self.printEntry("Precision", self.precision) + "\n"
summary += self.printEntry("F1", self.F1) + "\n"
summary += self.printEntry("False Alarm Rate", self.FAR) + "\n"
summary += "\n"
summary += self.printEntry("Mostly Tracked", self.MT) + "\n"
summary += self.printEntry("Partly Tracked", self.PT) + "\n"
summary += self.printEntry("Mostly Lost", self.ML) + "\n"
summary += "\n"
summary += self.printEntry("True Positives", self.tp) + "\n"
#summary += self.printEntry("True Positives per Sequence", self.tps) + "\n"
summary += self.printEntry("Ignored True Positives", self.itp) + "\n"
#summary += self.printEntry("Ignored True Positives per Sequence", self.itps) + "\n"
summary += self.printEntry("False Positives", self.fp) + "\n"
#summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
summary += self.printEntry("False Negatives", self.fn) + "\n"
#summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
summary += self.printEntry("ID-switches", self.id_switches) + "\n"
self.fp = self.fp / self.n_gt
self.fn = self.fn / self.n_gt
self.id_switches = self.id_switches / self.n_gt
summary += self.printEntry("False Positives Ratio", self.fp) + "\n"
#summary += self.printEntry("False Positives per Sequence", self.fps) + "\n"
summary += self.printEntry("False Negatives Ratio", self.fn) + "\n"
#summary += self.printEntry("False Negatives per Sequence", self.fns) + "\n"
summary += self.printEntry("Ignored False Negatives Ratio",
self.ifn) + "\n"
#summary += self.printEntry("Ignored False Negatives per Sequence", self.ifns) + "\n"
summary += self.printEntry("Missed Targets", self.fn) + "\n"
summary += self.printEntry("ID-switches", self.id_switches) + "\n"
summary += self.printEntry("Fragmentations", self.fragments) + "\n"
summary += "\n"
summary += self.printEntry("Ground Truth Objects (Total)", self.n_gt +
self.n_igt) + "\n"
#summary += self.printEntry("Ground Truth Objects (Total) per Sequence", self.n_gts) + "\n"
summary += self.printEntry("Ignored Ground Truth Objects",
self.n_igt) + "\n"
#summary += self.printEntry("Ignored Ground Truth Objects per Sequence", self.n_igts) + "\n"
summary += self.printEntry("Ground Truth Trajectories",
self.n_gt_trajectories) + "\n"
summary += "\n"
summary += self.printEntry("Tracker Objects (Total)", self.n_tr) + "\n"
#summary += self.printEntry("Tracker Objects (Total) per Sequence", self.n_trs) + "\n"
summary += self.printEntry("Ignored Tracker Objects", self.n_itr) + "\n"
#summary += self.printEntry("Ignored Tracker Objects per Sequence", self.n_itrs) + "\n"
summary += self.printEntry("Tracker Trajectories",
self.n_tr_trajectories) + "\n"
#summary += "\n"
#summary += self.printEntry("Ignored Tracker Objects with Associated Ignored Ground Truth Objects", self.n_igttr) + "\n"
summary += "=" * 80
return summary
def printEntry(self, key, val, width=(70, 10)):
"""
Pretty print an entry in a table fashion.
"""
s_out = key.ljust(width[0])
if type(val) == int:
s = "%%%dd" % width[1]
s_out += s % val
elif type(val) == float:
s = "%%%df" % (width[1])
s_out += s % val
else:
s_out += ("%s" % val).rjust(width[1])
return s_out
def saveToStats(self, save_summary):
"""
Save the statistics in a whitespace separate file.
"""
summary = self.createSummary()
if save_summary:
filename = os.path.join(self.result_path,
"summary_%s.txt" % self.cls)
dump = open(filename, "w+")
dump.write(summary)
dump.close()
return summary
class KITTIMOTMetric(Metric):
def __init__(self, save_summary=True):
self.save_summary = save_summary
self.MOTEvaluator = KITTIEvaluation
self.result_root = None
self.reset()
def reset(self):
self.seqs = []
self.n_sequences = 0
self.n_frames = []
self.strsummary = ''
def update(self, data_root, seq, data_type, result_root, result_filename):
assert data_type == 'kitti', "data_type should 'kitti'"
self.result_root = result_root
self.gt_path = data_root
gt_path = '{}/../labels/{}.txt'.format(data_root, seq)
gt = open(gt_path, "r")
max_frame = 0
for line in gt:
line = line.strip()
line_list = line.split(" ")
if int(line_list[0]) > max_frame:
max_frame = int(line_list[0])
rs = open(result_filename, "r")
for line in rs:
line = line.strip()
line_list = line.split(" ")
if int(line_list[0]) > max_frame:
max_frame = int(line_list[0])
gt.close()
rs.close()
self.n_frames.append(max_frame + 1)
self.seqs.append(seq)
self.n_sequences += 1
def accumulate(self):
logger.info("Processing Result for KITTI Tracking Benchmark")
e = self.MOTEvaluator(result_path=self.result_root, gt_path=self.gt_path,\
n_frames=self.n_frames, seqs=self.seqs, n_sequences=self.n_sequences)
try:
if not e.loadTracker():
return
logger.info("Loading Results - Success")
logger.info("Evaluate Object Class: %s" % c.upper())
except:
logger.info("Caught exception while loading result data.")
if not e.loadGroundtruth():
raise ValueError("Ground truth not found.")
logger.info("Loading Groundtruth - Success")
# sanity checks
if len(e.groundtruth) is not len(e.tracker):
logger.info(
"The uploaded data does not provide results for every sequence.")
return False
logger.info("Loaded %d Sequences." % len(e.groundtruth))
logger.info("Start Evaluation...")
if e.compute3rdPartyMetrics():
self.strsummary = e.saveToStats(self.save_summary)
else:
logger.info(
"There seem to be no true positives or false positives at all in the submitted data."
)
def log(self):
print(self.strsummary)
def get_results(self):
return self.strsummary
| PaddleDetection/ppdet/metrics/mot_metrics.py/0 | {
"file_path": "PaddleDetection/ppdet/metrics/mot_metrics.py",
"repo_id": "PaddleDetection",
"token_count": 27177
} | 67 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import numpy as np
import paddle
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
from ..keypoint_utils import affine_transform
from ppdet.data.transform.op_helper import gaussian_radius, gaussian2D, draw_umich_gaussian
__all__ = ['CenterTrack']
@register
class CenterTrack(BaseArch):
"""
CenterTrack network, see http://arxiv.org/abs/2004.01177
Args:
detector (object): 'CenterNet' instance
plugin_head (object): 'CenterTrackHead' instance
tracker (object): 'CenterTracker' instance
"""
__category__ = 'architecture'
__shared__ = ['mot_metric']
def __init__(self,
detector='CenterNet',
plugin_head='CenterTrackHead',
tracker='CenterTracker',
mot_metric=False):
super(CenterTrack, self).__init__()
self.detector = detector
self.plugin_head = plugin_head
self.tracker = tracker
self.mot_metric = mot_metric
self.pre_image = None
self.deploy = False
@classmethod
def from_config(cls, cfg, *args, **kwargs):
detector = create(cfg['detector'])
detector_out_shape = detector.neck and detector.neck.out_shape or detector.backbone.out_shape
kwargs = {'input_shape': detector_out_shape}
plugin_head = create(cfg['plugin_head'], **kwargs)
tracker = create(cfg['tracker'])
return {
'detector': detector,
'plugin_head': plugin_head,
'tracker': tracker,
}
def _forward(self):
if self.training:
det_outs = self.detector(self.inputs)
neck_feat = det_outs['neck_feat']
losses = {}
for k, v in det_outs.items():
if 'loss' not in k: continue
losses.update({k: v})
plugin_outs = self.plugin_head(neck_feat, self.inputs)
for k, v in plugin_outs.items():
if 'loss' not in k: continue
losses.update({k: v})
losses['loss'] = det_outs['det_loss'] + plugin_outs['plugin_loss']
return losses
else:
if not self.mot_metric:
# detection, support bs>=1
det_outs = self.detector(self.inputs)
return {
'bbox': det_outs['bbox'],
'bbox_num': det_outs['bbox_num']
}
else:
# MOT, only support bs=1
if not self.deploy:
if self.pre_image is None:
self.pre_image = self.inputs['image']
# initializing tracker for the first frame
self.tracker.init_track([])
self.inputs['pre_image'] = self.pre_image
self.pre_image = self.inputs[
'image'] # Note: update for next image
# render input heatmap from tracker status
pre_hm = self.get_additional_inputs(
self.tracker.tracks, self.inputs, with_hm=True)
self.inputs['pre_hm'] = paddle.to_tensor(pre_hm)
# model inference
det_outs = self.detector(self.inputs)
neck_feat = det_outs['neck_feat']
result = self.plugin_head(
neck_feat, self.inputs, det_outs['bbox'],
det_outs['bbox_inds'], det_outs['topk_clses'],
det_outs['topk_ys'], det_outs['topk_xs'])
if not self.deploy:
# convert the cropped and 4x downsampled output coordinate system
# back to the input image coordinate system
result = self.plugin_head.centertrack_post_process(
result, self.inputs, self.tracker.out_thresh)
return result
def get_pred(self):
return self._forward()
def get_loss(self):
return self._forward()
def reset_tracking(self):
self.tracker.reset()
self.pre_image = None
def get_additional_inputs(self, dets, meta, with_hm=True):
# Render input heatmap from previous trackings.
trans_input = meta['trans_input'][0].numpy()
inp_width, inp_height = int(meta['inp_width'][0]), int(meta[
'inp_height'][0])
input_hm = np.zeros((1, inp_height, inp_width), dtype=np.float32)
for det in dets:
if det['score'] < self.tracker.pre_thresh:
continue
bbox = affine_transform_bbox(det['bbox'], trans_input, inp_width,
inp_height)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0):
radius = gaussian_radius(
(math.ceil(h), math.ceil(w)), min_overlap=0.7)
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2],
dtype=np.float32)
ct_int = ct.astype(np.int32)
if with_hm:
input_hm[0] = draw_umich_gaussian(input_hm[0], ct_int,
radius)
if with_hm:
input_hm = input_hm[np.newaxis]
return input_hm
def affine_transform_bbox(bbox, trans, width, height):
bbox = np.array(copy.deepcopy(bbox), dtype=np.float32)
bbox[:2] = affine_transform(bbox[:2], trans)
bbox[2:] = affine_transform(bbox[2:], trans)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, width - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, height - 1)
return bbox
| PaddleDetection/ppdet/modeling/architectures/centertrack.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/architectures/centertrack.py",
"repo_id": "PaddleDetection",
"token_count": 3231
} | 68 |
from typing import Dict
from collections import OrderedDict
from ppdet.modeling.architectures.meta_arch import BaseArch
class MultiSteamDetector(BaseArch):
def __init__(self,
model: Dict[str, BaseArch],
train_cfg=None,
test_cfg=None):
super(MultiSteamDetector, self).__init__()
self.submodules = list(model.keys())
for k, v in model.items():
setattr(self, k, v)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.inference_on = self.test_cfg.get("inference_on",
self.submodules[0])
self.first_load = True
def forward(self, inputs, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(inputs, **kwargs)
else:
return self.forward_test(inputs, **kwargs)
def get_loss(self, **kwargs):
# losses = self(**data)
return self.forward_train(self, **kwargs)
def model(self, **kwargs) -> BaseArch:
if "submodule" in kwargs:
assert (kwargs["submodule"] in self.submodules
), "Detector does not contain submodule {}".format(kwargs[
"submodule"])
model: BaseArch = getattr(self, kwargs["submodule"])
else:
model: BaseArch = getattr(self, self.inference_on)
return model
def freeze(self, model_ref: str):
assert model_ref in self.submodules
model = getattr(self, model_ref)
model.eval()
for param in model.parameters():
param.stop_gradient = True
def update_ema_model(self, momentum=0.9996):
# print(momentum)
model_dict = self.student.state_dict()
new_dict = OrderedDict()
for key, value in self.teacher.state_dict().items():
if key in model_dict.keys():
new_dict[key] = (model_dict[key] *
(1 - momentum) + value * momentum)
else:
raise Exception("{} is not found in student model".format(key))
self.teacher.set_dict(new_dict)
| PaddleDetection/ppdet/modeling/architectures/multi_stream_detector.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/architectures/multi_stream_detector.py",
"repo_id": "PaddleDetection",
"token_count": 1230
} | 69 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ..bbox_utils import iou_similarity, batch_iou_similarity
from ..bbox_utils import bbox_center
from .utils import (check_points_inside_bboxes, compute_max_iou_anchor,
compute_max_iou_gt)
__all__ = ['ATSSAssigner']
@register
class ATSSAssigner(nn.Layer):
"""Bridging the Gap Between Anchor-based and Anchor-free Detection
via Adaptive Training Sample Selection
"""
__shared__ = ['num_classes']
def __init__(self,
topk=9,
num_classes=80,
force_gt_matching=False,
eps=1e-9,
sm_use=False):
super(ATSSAssigner, self).__init__()
self.topk = topk
self.num_classes = num_classes
self.force_gt_matching = force_gt_matching
self.eps = eps
self.sm_use = sm_use
def _gather_topk_pyramid(self, gt2anchor_distances, num_anchors_list,
pad_gt_mask):
gt2anchor_distances_list = paddle.split(
gt2anchor_distances, num_anchors_list, axis=-1)
num_anchors_index = np.cumsum(num_anchors_list).tolist()
num_anchors_index = [0, ] + num_anchors_index[:-1]
is_in_topk_list = []
topk_idxs_list = []
for distances, anchors_index in zip(gt2anchor_distances_list,
num_anchors_index):
num_anchors = distances.shape[-1]
_, topk_idxs = paddle.topk(
distances, self.topk, axis=-1, largest=False)
topk_idxs_list.append(topk_idxs + anchors_index)
is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(
axis=-2).astype(gt2anchor_distances.dtype)
is_in_topk_list.append(is_in_topk * pad_gt_mask)
is_in_topk_list = paddle.concat(is_in_topk_list, axis=-1)
topk_idxs_list = paddle.concat(topk_idxs_list, axis=-1)
return is_in_topk_list, topk_idxs_list
@paddle.no_grad()
def forward(self,
anchor_bboxes,
num_anchors_list,
gt_labels,
gt_bboxes,
pad_gt_mask,
bg_index,
gt_scores=None,
pred_bboxes=None):
r"""This code is based on
https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/atss_assigner.py
The assignment is done in following steps
1. compute iou between all bbox (bbox of all pyramid levels) and gt
2. compute center distance between all bbox and gt
3. on each pyramid level, for each gt, select k bbox whose center
are closest to the gt center, so we total select k*l bbox as
candidates for each gt
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as positive
6. limit the positive sample's center in gt
7. if an anchor box is assigned to multiple gts, the one with the
highest iou will be selected.
Args:
anchor_bboxes (Tensor, float32): pre-defined anchors, shape(L, 4),
"xmin, xmax, ymin, ymax" format
num_anchors_list (List): num of anchors in each level
gt_labels (Tensor, int64|int32): Label of gt_bboxes, shape(B, n, 1)
gt_bboxes (Tensor, float32): Ground truth bboxes, shape(B, n, 4)
pad_gt_mask (Tensor, float32): 1 means bbox, 0 means no bbox, shape(B, n, 1)
bg_index (int): background index
gt_scores (Tensor|None, float32) Score of gt_bboxes,
shape(B, n, 1), if None, then it will initialize with one_hot label
pred_bboxes (Tensor, float32, optional): predicted bounding boxes, shape(B, L, 4)
Returns:
assigned_labels (Tensor): (B, L)
assigned_bboxes (Tensor): (B, L, 4)
assigned_scores (Tensor): (B, L, C), if pred_bboxes is not None, then output ious
"""
assert gt_labels.ndim == gt_bboxes.ndim and \
gt_bboxes.ndim == 3
num_anchors, _ = anchor_bboxes.shape
batch_size, num_max_boxes, _ = gt_bboxes.shape
# negative batch
if num_max_boxes == 0:
assigned_labels = paddle.full(
[batch_size, num_anchors], bg_index, dtype='int32')
assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])
assigned_scores = paddle.zeros(
[batch_size, num_anchors, self.num_classes])
return assigned_labels, assigned_bboxes, assigned_scores
# 1. compute iou between gt and anchor bbox, [B, n, L]
ious = iou_similarity(gt_bboxes.reshape([-1, 4]), anchor_bboxes)
ious = ious.reshape([batch_size, -1, num_anchors])
# 2. compute center distance between all anchors and gt, [B, n, L]
gt_centers = bbox_center(gt_bboxes.reshape([-1, 4])).unsqueeze(1)
anchor_centers = bbox_center(anchor_bboxes)
gt2anchor_distances = (gt_centers - anchor_centers.unsqueeze(0)) \
.norm(2, axis=-1).reshape([batch_size, -1, num_anchors])
# 3. on each pyramid level, selecting topk closest candidates
# based on the center distance, [B, n, L]
is_in_topk, topk_idxs = self._gather_topk_pyramid(
gt2anchor_distances, num_anchors_list, pad_gt_mask)
# 4. get corresponding iou for the these candidates, and compute the
# mean and std, 5. set mean + std as the iou threshold
iou_candidates = ious * is_in_topk
iou_threshold = paddle.index_sample(
iou_candidates.flatten(stop_axis=-2),
topk_idxs.flatten(stop_axis=-2))
iou_threshold = iou_threshold.reshape([batch_size, num_max_boxes, -1])
iou_threshold = iou_threshold.mean(axis=-1, keepdim=True) + \
iou_threshold.std(axis=-1, keepdim=True)
is_in_topk = paddle.where(iou_candidates > iou_threshold, is_in_topk,
paddle.zeros_like(is_in_topk))
# 6. check the positive sample's center in gt, [B, n, L]
if self.sm_use:
is_in_gts = check_points_inside_bboxes(
anchor_centers, gt_bboxes, sm_use=True)
else:
is_in_gts = check_points_inside_bboxes(anchor_centers, gt_bboxes)
# select positive sample, [B, n, L]
mask_positive = is_in_topk * is_in_gts * pad_gt_mask
# 7. if an anchor box is assigned to multiple gts,
# the one with the highest iou will be selected.
mask_positive_sum = mask_positive.sum(axis=-2)
if mask_positive_sum.max() > 1:
mask_multiple_gts = (
mask_positive_sum.unsqueeze(1) > 1).astype('int32').tile(
[1, num_max_boxes, 1]).astype('bool')
if self.sm_use:
is_max_iou = compute_max_iou_anchor(ious * mask_positive)
else:
is_max_iou = compute_max_iou_anchor(ious)
mask_positive = paddle.where(mask_multiple_gts, is_max_iou,
mask_positive)
mask_positive_sum = mask_positive.sum(axis=-2)
# 8. make sure every gt_bbox matches the anchor
if self.force_gt_matching:
is_max_iou = compute_max_iou_gt(ious) * pad_gt_mask
mask_max_iou = (is_max_iou.sum(-2, keepdim=True) == 1).tile(
[1, num_max_boxes, 1])
mask_positive = paddle.where(mask_max_iou, is_max_iou,
mask_positive)
mask_positive_sum = mask_positive.sum(axis=-2)
assigned_gt_index = mask_positive.argmax(axis=-2)
# assigned target
batch_ind = paddle.arange(
end=batch_size, dtype=gt_labels.dtype).unsqueeze(-1)
assigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes
assigned_labels = paddle.gather(
gt_labels.flatten(), assigned_gt_index.flatten(), axis=0)
assigned_labels = assigned_labels.reshape([batch_size, num_anchors])
assigned_labels = paddle.where(
mask_positive_sum > 0, assigned_labels,
paddle.full_like(assigned_labels, bg_index))
assigned_bboxes = paddle.gather(
gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)
assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])
assigned_scores = F.one_hot(assigned_labels, self.num_classes + 1)
ind = list(range(self.num_classes + 1))
ind.remove(bg_index)
assigned_scores = paddle.index_select(
assigned_scores, paddle.to_tensor(ind), axis=-1)
if pred_bboxes is not None:
# assigned iou
ious = batch_iou_similarity(gt_bboxes, pred_bboxes) * mask_positive
ious = ious.max(axis=-2).unsqueeze(-1)
assigned_scores *= ious
elif gt_scores is not None:
gather_scores = paddle.gather(
gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)
gather_scores = gather_scores.reshape([batch_size, num_anchors])
gather_scores = paddle.where(mask_positive_sum > 0, gather_scores,
paddle.zeros_like(gather_scores))
assigned_scores *= gather_scores.unsqueeze(-1)
return assigned_labels, assigned_bboxes, assigned_scores
| PaddleDetection/ppdet/modeling/assigners/atss_assigner.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/assigners/atss_assigner.py",
"repo_id": "PaddleDetection",
"token_count": 4970
} | 70 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register, serializable
from ppdet.modeling.initializer import conv_init_
from ..shape_spec import ShapeSpec
__all__ = [
'CSPDarkNet', 'BaseConv', 'DWConv', 'BottleNeck', 'SPPLayer', 'SPPFLayer'
]
class BaseConv(nn.Layer):
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
groups=1,
bias=False,
act="silu"):
super(BaseConv, self).__init__()
self.conv = nn.Conv2D(
in_channels,
out_channels,
kernel_size=ksize,
stride=stride,
padding=(ksize - 1) // 2,
groups=groups,
bias_attr=bias)
self.bn = nn.BatchNorm2D(
out_channels,
weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
self._init_weights()
def _init_weights(self):
conv_init_(self.conv)
def forward(self, x):
# use 'x * F.sigmoid(x)' replace 'silu'
x = self.bn(self.conv(x))
y = x * F.sigmoid(x)
return y
class DWConv(nn.Layer):
"""Depthwise Conv"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride=1,
bias=False,
act="silu"):
super(DWConv, self).__init__()
self.dw_conv = BaseConv(
in_channels,
in_channels,
ksize=ksize,
stride=stride,
groups=in_channels,
bias=bias,
act=act)
self.pw_conv = BaseConv(
in_channels,
out_channels,
ksize=1,
stride=1,
groups=1,
bias=bias,
act=act)
def forward(self, x):
return self.pw_conv(self.dw_conv(x))
class Focus(nn.Layer):
"""Focus width and height information into channel space, used in YOLOX."""
def __init__(self,
in_channels,
out_channels,
ksize=3,
stride=1,
bias=False,
act="silu"):
super(Focus, self).__init__()
self.conv = BaseConv(
in_channels * 4,
out_channels,
ksize=ksize,
stride=stride,
bias=bias,
act=act)
def forward(self, inputs):
# inputs [bs, C, H, W] -> outputs [bs, 4C, W/2, H/2]
top_left = inputs[:, :, 0::2, 0::2]
top_right = inputs[:, :, 0::2, 1::2]
bottom_left = inputs[:, :, 1::2, 0::2]
bottom_right = inputs[:, :, 1::2, 1::2]
outputs = paddle.concat(
[top_left, bottom_left, top_right, bottom_right], 1)
return self.conv(outputs)
class BottleNeck(nn.Layer):
def __init__(self,
in_channels,
out_channels,
shortcut=True,
expansion=0.5,
depthwise=False,
bias=False,
act="silu"):
super(BottleNeck, self).__init__()
hidden_channels = int(out_channels * expansion)
Conv = DWConv if depthwise else BaseConv
self.conv1 = BaseConv(
in_channels, hidden_channels, ksize=1, stride=1, bias=bias, act=act)
self.conv2 = Conv(
hidden_channels,
out_channels,
ksize=3,
stride=1,
bias=bias,
act=act)
self.add_shortcut = shortcut and in_channels == out_channels
def forward(self, x):
y = self.conv2(self.conv1(x))
if self.add_shortcut:
y = y + x
return y
class SPPLayer(nn.Layer):
"""Spatial Pyramid Pooling (SPP) layer used in YOLOv3-SPP and YOLOX"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes=(5, 9, 13),
bias=False,
act="silu"):
super(SPPLayer, self).__init__()
hidden_channels = in_channels // 2
self.conv1 = BaseConv(
in_channels, hidden_channels, ksize=1, stride=1, bias=bias, act=act)
self.maxpoolings = nn.LayerList([
nn.MaxPool2D(
kernel_size=ks, stride=1, padding=ks // 2)
for ks in kernel_sizes
])
conv2_channels = hidden_channels * (len(kernel_sizes) + 1)
self.conv2 = BaseConv(
conv2_channels, out_channels, ksize=1, stride=1, bias=bias, act=act)
def forward(self, x):
x = self.conv1(x)
x = paddle.concat([x] + [mp(x) for mp in self.maxpoolings], axis=1)
x = self.conv2(x)
return x
class SPPFLayer(nn.Layer):
""" Spatial Pyramid Pooling - Fast (SPPF) layer used in YOLOv5 by Glenn Jocher,
equivalent to SPP(k=(5, 9, 13))
"""
def __init__(self,
in_channels,
out_channels,
ksize=5,
bias=False,
act='silu'):
super(SPPFLayer, self).__init__()
hidden_channels = in_channels // 2
self.conv1 = BaseConv(
in_channels, hidden_channels, ksize=1, stride=1, bias=bias, act=act)
self.maxpooling = nn.MaxPool2D(
kernel_size=ksize, stride=1, padding=ksize // 2)
conv2_channels = hidden_channels * 4
self.conv2 = BaseConv(
conv2_channels, out_channels, ksize=1, stride=1, bias=bias, act=act)
def forward(self, x):
x = self.conv1(x)
y1 = self.maxpooling(x)
y2 = self.maxpooling(y1)
y3 = self.maxpooling(y2)
concats = paddle.concat([x, y1, y2, y3], axis=1)
out = self.conv2(concats)
return out
class CSPLayer(nn.Layer):
"""CSP (Cross Stage Partial) layer with 3 convs, named C3 in YOLOv5"""
def __init__(self,
in_channels,
out_channels,
num_blocks=1,
shortcut=True,
expansion=0.5,
depthwise=False,
bias=False,
act="silu"):
super(CSPLayer, self).__init__()
hidden_channels = int(out_channels * expansion)
self.conv1 = BaseConv(
in_channels, hidden_channels, ksize=1, stride=1, bias=bias, act=act)
self.conv2 = BaseConv(
in_channels, hidden_channels, ksize=1, stride=1, bias=bias, act=act)
self.bottlenecks = nn.Sequential(* [
BottleNeck(
hidden_channels,
hidden_channels,
shortcut=shortcut,
expansion=1.0,
depthwise=depthwise,
bias=bias,
act=act) for _ in range(num_blocks)
])
self.conv3 = BaseConv(
hidden_channels * 2,
out_channels,
ksize=1,
stride=1,
bias=bias,
act=act)
def forward(self, x):
x_1 = self.conv1(x)
x_1 = self.bottlenecks(x_1)
x_2 = self.conv2(x)
x = paddle.concat([x_1, x_2], axis=1)
x = self.conv3(x)
return x
@register
@serializable
class CSPDarkNet(nn.Layer):
"""
CSPDarkNet backbone.
Args:
arch (str): Architecture of CSPDarkNet, from {P5, P6, X}, default as X,
and 'X' means used in YOLOX, 'P5/P6' means used in YOLOv5.
depth_mult (float): Depth multiplier, multiply number of channels in
each layer, default as 1.0.
width_mult (float): Width multiplier, multiply number of blocks in
CSPLayer, default as 1.0.
depthwise (bool): Whether to use depth-wise conv layer.
act (str): Activation function type, default as 'silu'.
return_idx (list): Index of stages whose feature maps are returned.
"""
__shared__ = ['depth_mult', 'width_mult', 'act', 'trt']
# in_channels, out_channels, num_blocks, add_shortcut, use_spp(use_sppf)
# 'X' means setting used in YOLOX, 'P5/P6' means setting used in YOLOv5.
arch_settings = {
'X': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 1024, 3, False, True]],
'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 9, True, False], [512, 1024, 3, True, True]],
'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 9, True, False], [512, 768, 3, True, False],
[768, 1024, 3, True, True]],
}
def __init__(self,
arch='X',
depth_mult=1.0,
width_mult=1.0,
depthwise=False,
act='silu',
trt=False,
return_idx=[2, 3, 4]):
super(CSPDarkNet, self).__init__()
self.arch = arch
self.return_idx = return_idx
Conv = DWConv if depthwise else BaseConv
arch_setting = self.arch_settings[arch]
base_channels = int(arch_setting[0][0] * width_mult)
# Note: differences between the latest YOLOv5 and the original YOLOX
# 1. self.stem, use SPPF(in YOLOv5) or SPP(in YOLOX)
# 2. use SPPF(in YOLOv5) or SPP(in YOLOX)
# 3. put SPPF before(YOLOv5) or SPP after(YOLOX) the last cspdark block's CSPLayer
# 4. whether SPPF(SPP)'CSPLayer add shortcut, True in YOLOv5, False in YOLOX
if arch in ['P5', 'P6']:
# in the latest YOLOv5, use Conv stem, and SPPF (fast, only single spp kernal size)
self.stem = Conv(
3, base_channels, ksize=6, stride=2, bias=False, act=act)
spp_kernal_sizes = 5
elif arch in ['X']:
# in the original YOLOX, use Focus stem, and SPP (three spp kernal sizes)
self.stem = Focus(
3, base_channels, ksize=3, stride=1, bias=False, act=act)
spp_kernal_sizes = (5, 9, 13)
else:
raise AttributeError("Unsupported arch type: {}".format(arch))
_out_channels = [base_channels]
layers_num = 1
self.csp_dark_blocks = []
for i, (in_channels, out_channels, num_blocks, shortcut,
use_spp) in enumerate(arch_setting):
in_channels = int(in_channels * width_mult)
out_channels = int(out_channels * width_mult)
_out_channels.append(out_channels)
num_blocks = max(round(num_blocks * depth_mult), 1)
stage = []
conv_layer = self.add_sublayer(
'layers{}.stage{}.conv_layer'.format(layers_num, i + 1),
Conv(
in_channels, out_channels, 3, 2, bias=False, act=act))
stage.append(conv_layer)
layers_num += 1
if use_spp and arch in ['X']:
# in YOLOX use SPPLayer
spp_layer = self.add_sublayer(
'layers{}.stage{}.spp_layer'.format(layers_num, i + 1),
SPPLayer(
out_channels,
out_channels,
kernel_sizes=spp_kernal_sizes,
bias=False,
act=act))
stage.append(spp_layer)
layers_num += 1
csp_layer = self.add_sublayer(
'layers{}.stage{}.csp_layer'.format(layers_num, i + 1),
CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
shortcut=shortcut,
depthwise=depthwise,
bias=False,
act=act))
stage.append(csp_layer)
layers_num += 1
if use_spp and arch in ['P5', 'P6']:
# in latest YOLOv5 use SPPFLayer instead of SPPLayer
sppf_layer = self.add_sublayer(
'layers{}.stage{}.sppf_layer'.format(layers_num, i + 1),
SPPFLayer(
out_channels,
out_channels,
ksize=5,
bias=False,
act=act))
stage.append(sppf_layer)
layers_num += 1
self.csp_dark_blocks.append(nn.Sequential(*stage))
self._out_channels = [_out_channels[i] for i in self.return_idx]
self.strides = [[2, 4, 8, 16, 32, 64][i] for i in self.return_idx]
def forward(self, inputs):
x = inputs['image']
outputs = []
x = self.stem(x)
for i, layer in enumerate(self.csp_dark_blocks):
x = layer(x)
if i + 1 in self.return_idx:
outputs.append(x)
return outputs
@property
def out_shape(self):
return [
ShapeSpec(
channels=c, stride=s)
for c, s in zip(self._out_channels, self.strides)
]
| PaddleDetection/ppdet/modeling/backbones/csp_darknet.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/backbones/csp_darknet.py",
"repo_id": "PaddleDetection",
"token_count": 7489
} | 71 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numbers import Integral
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
from ..shape_spec import ShapeSpec
from .resnet import ConvNormLayer
__all__ = ['Res2Net', 'Res2NetC5']
Res2Net_cfg = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 12, 48, 3]
}
class BottleNeck(nn.Layer):
def __init__(self,
ch_in,
ch_out,
stride,
shortcut,
width,
scales=4,
variant='b',
groups=1,
lr=1.0,
norm_type='bn',
norm_decay=0.,
freeze_norm=True,
dcn_v2=False):
super(BottleNeck, self).__init__()
self.shortcut = shortcut
self.scales = scales
self.stride = stride
if not shortcut:
if variant == 'd' and stride == 2:
self.branch1 = nn.Sequential()
self.branch1.add_sublayer(
'pool',
nn.AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True))
self.branch1.add_sublayer(
'conv',
ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=1,
stride=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr))
else:
self.branch1 = ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=1,
stride=stride,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
self.branch2a = ConvNormLayer(
ch_in=ch_in,
ch_out=width * scales,
filter_size=1,
stride=stride if variant == 'a' else 1,
groups=1,
act='relu',
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
self.branch2b = nn.LayerList([
ConvNormLayer(
ch_in=width,
ch_out=width,
filter_size=3,
stride=1 if variant == 'a' else stride,
groups=groups,
act='relu',
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr,
dcn_v2=dcn_v2) for _ in range(self.scales - 1)
])
self.branch2c = ConvNormLayer(
ch_in=width * scales,
ch_out=ch_out,
filter_size=1,
stride=1,
groups=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=lr)
def forward(self, inputs):
out = self.branch2a(inputs)
feature_split = paddle.split(out, self.scales, 1)
out_split = []
for i in range(self.scales - 1):
if i == 0 or self.stride == 2:
out_split.append(self.branch2b[i](feature_split[i]))
else:
out_split.append(self.branch2b[i](paddle.add(feature_split[i],
out_split[-1])))
if self.stride == 1:
out_split.append(feature_split[-1])
else:
out_split.append(F.avg_pool2d(feature_split[-1], 3, self.stride, 1))
out = self.branch2c(paddle.concat(out_split, 1))
if self.shortcut:
short = inputs
else:
short = self.branch1(inputs)
out = paddle.add(out, short)
out = F.relu(out)
return out
class Blocks(nn.Layer):
def __init__(self,
ch_in,
ch_out,
count,
stage_num,
width,
scales=4,
variant='b',
groups=1,
lr=1.0,
norm_type='bn',
norm_decay=0.,
freeze_norm=True,
dcn_v2=False):
super(Blocks, self).__init__()
self.blocks = nn.Sequential()
for i in range(count):
self.blocks.add_sublayer(
str(i),
BottleNeck(
ch_in=ch_in if i == 0 else ch_out,
ch_out=ch_out,
stride=2 if i == 0 and stage_num != 2 else 1,
shortcut=False if i == 0 else True,
width=width * (2**(stage_num - 2)),
scales=scales,
variant=variant,
groups=groups,
lr=lr,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
dcn_v2=dcn_v2))
def forward(self, inputs):
return self.blocks(inputs)
@register
@serializable
class Res2Net(nn.Layer):
"""
Res2Net, see https://arxiv.org/abs/1904.01169
Args:
depth (int): Res2Net depth, should be 50, 101, 152, 200.
width (int): Res2Net width
scales (int): Res2Net scale
variant (str): Res2Net variant, supports 'a', 'b', 'c', 'd' currently
lr_mult_list (list): learning rate ratio of different resnet stages(2,3,4,5),
lower learning rate ratio is need for pretrained model
got using distillation(default as [1.0, 1.0, 1.0, 1.0]).
groups (int): The groups number of the Conv Layer.
norm_type (str): normalization type, 'bn' or 'sync_bn'
norm_decay (float): weight decay for normalization layer weights
freeze_norm (bool): freeze normalization layers
freeze_at (int): freeze the backbone at which stage
return_idx (list): index of stages whose feature maps are returned,
index 0 stands for res2
dcn_v2_stages (list): index of stages who select deformable conv v2
num_stages (int): number of stages created
"""
__shared__ = ['norm_type']
def __init__(self,
depth=50,
width=26,
scales=4,
variant='b',
lr_mult_list=[1.0, 1.0, 1.0, 1.0],
groups=1,
norm_type='bn',
norm_decay=0.,
freeze_norm=True,
freeze_at=0,
return_idx=[0, 1, 2, 3],
dcn_v2_stages=[-1],
num_stages=4):
super(Res2Net, self).__init__()
self._model_type = 'Res2Net' if groups == 1 else 'Res2NeXt'
assert depth in [50, 101, 152, 200], \
"depth {} not in [50, 101, 152, 200]"
assert variant in ['a', 'b', 'c', 'd'], "invalid Res2Net variant"
assert num_stages >= 1 and num_stages <= 4
self.depth = depth
self.variant = variant
self.norm_type = norm_type
self.norm_decay = norm_decay
self.freeze_norm = freeze_norm
self.freeze_at = freeze_at
if isinstance(return_idx, Integral):
return_idx = [return_idx]
assert max(return_idx) < num_stages, \
'the maximum return index must smaller than num_stages, ' \
'but received maximum return index is {} and num_stages ' \
'is {}'.format(max(return_idx), num_stages)
self.return_idx = return_idx
self.num_stages = num_stages
assert len(lr_mult_list) == 4, \
"lr_mult_list length must be 4 but got {}".format(len(lr_mult_list))
if isinstance(dcn_v2_stages, Integral):
dcn_v2_stages = [dcn_v2_stages]
assert max(dcn_v2_stages) < num_stages
self.dcn_v2_stages = dcn_v2_stages
block_nums = Res2Net_cfg[depth]
# C1 stage
if self.variant in ['c', 'd']:
conv_def = [
[3, 32, 3, 2, "conv1_1"],
[32, 32, 3, 1, "conv1_2"],
[32, 64, 3, 1, "conv1_3"],
]
else:
conv_def = [[3, 64, 7, 2, "conv1"]]
self.res1 = nn.Sequential()
for (c_in, c_out, k, s, _name) in conv_def:
self.res1.add_sublayer(
_name,
ConvNormLayer(
ch_in=c_in,
ch_out=c_out,
filter_size=k,
stride=s,
groups=1,
act='relu',
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
lr=1.0))
self._in_channels = [64, 256, 512, 1024]
self._out_channels = [256, 512, 1024, 2048]
self._out_strides = [4, 8, 16, 32]
# C2-C5 stages
self.res_layers = []
for i in range(num_stages):
lr_mult = lr_mult_list[i]
stage_num = i + 2
self.res_layers.append(
self.add_sublayer(
"res{}".format(stage_num),
Blocks(
self._in_channels[i],
self._out_channels[i],
count=block_nums[i],
stage_num=stage_num,
width=width,
scales=scales,
groups=groups,
lr=lr_mult,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
dcn_v2=(i in self.dcn_v2_stages))))
@property
def out_shape(self):
return [
ShapeSpec(
channels=self._out_channels[i], stride=self._out_strides[i])
for i in self.return_idx
]
def forward(self, inputs):
x = inputs['image']
res1 = self.res1(x)
x = F.max_pool2d(res1, kernel_size=3, stride=2, padding=1)
outs = []
for idx, stage in enumerate(self.res_layers):
x = stage(x)
if idx == self.freeze_at:
x.stop_gradient = True
if idx in self.return_idx:
outs.append(x)
return outs
@register
class Res2NetC5(nn.Layer):
def __init__(self, depth=50, width=26, scales=4, variant='b'):
super(Res2NetC5, self).__init__()
feat_in, feat_out = [1024, 2048]
self.res5 = Blocks(
feat_in,
feat_out,
count=3,
stage_num=5,
width=width,
scales=scales,
variant=variant)
self.feat_out = feat_out
@property
def out_shape(self):
return [ShapeSpec(
channels=self.feat_out,
stride=32, )]
def forward(self, roi_feat, stage=0):
y = self.res5(roi_feat)
return y
| PaddleDetection/ppdet/modeling/backbones/res2net.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/backbones/res2net.py",
"repo_id": "PaddleDetection",
"token_count": 6869
} | 72 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import Normal
from ppdet.core.workspace import register
from .bbox_head import BBoxHead, TwoFCHead, XConvNormHead
from .roi_extractor import RoIAlign
from ..shape_spec import ShapeSpec
from ..bbox_utils import delta2bbox, clip_bbox, nonempty_bbox
from ..cls_utils import _get_class_default_kwargs
__all__ = ['CascadeTwoFCHead', 'CascadeXConvNormHead', 'CascadeHead']
@register
class CascadeTwoFCHead(nn.Layer):
__shared__ = ['num_cascade_stage']
"""
Cascade RCNN bbox head with Two fc layers to extract feature
Args:
in_channel (int): Input channel which can be derived by from_config
out_channel (int): Output channel
resolution (int): Resolution of input feature map, default 7
num_cascade_stage (int): The number of cascade stage, default 3
"""
def __init__(self,
in_channel=256,
out_channel=1024,
resolution=7,
num_cascade_stage=3):
super(CascadeTwoFCHead, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.head_list = []
for stage in range(num_cascade_stage):
head_per_stage = self.add_sublayer(
str(stage), TwoFCHead(in_channel, out_channel, resolution))
self.head_list.append(head_per_stage)
@classmethod
def from_config(cls, cfg, input_shape):
s = input_shape
s = s[0] if isinstance(s, (list, tuple)) else s
return {'in_channel': s.channels}
@property
def out_shape(self):
return [ShapeSpec(channels=self.out_channel, )]
def forward(self, rois_feat, stage=0):
out = self.head_list[stage](rois_feat)
return out
@register
class CascadeXConvNormHead(nn.Layer):
__shared__ = ['norm_type', 'freeze_norm', 'num_cascade_stage']
"""
Cascade RCNN bbox head with serveral convolution layers
Args:
in_channel (int): Input channels which can be derived by from_config
num_convs (int): The number of conv layers
conv_dim (int): The number of channels for the conv layers
out_channel (int): Output channels
resolution (int): Resolution of input feature map
norm_type (string): Norm type, bn, gn, sync_bn are available,
default `gn`
freeze_norm (bool): Whether to freeze the norm
num_cascade_stage (int): The number of cascade stage, default 3
"""
def __init__(self,
in_channel=256,
num_convs=4,
conv_dim=256,
out_channel=1024,
resolution=7,
norm_type='gn',
freeze_norm=False,
num_cascade_stage=3):
super(CascadeXConvNormHead, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.head_list = []
for stage in range(num_cascade_stage):
head_per_stage = self.add_sublayer(
str(stage),
XConvNormHead(
in_channel,
num_convs,
conv_dim,
out_channel,
resolution,
norm_type,
freeze_norm,
stage_name='stage{}_'.format(stage)))
self.head_list.append(head_per_stage)
@classmethod
def from_config(cls, cfg, input_shape):
s = input_shape
s = s[0] if isinstance(s, (list, tuple)) else s
return {'in_channel': s.channels}
@property
def out_shape(self):
return [ShapeSpec(channels=self.out_channel, )]
def forward(self, rois_feat, stage=0):
out = self.head_list[stage](rois_feat)
return out
@register
class CascadeHead(BBoxHead):
__shared__ = ['num_classes', 'num_cascade_stages']
__inject__ = ['bbox_assigner', 'bbox_loss']
"""
Cascade RCNN bbox head
Args:
head (nn.Layer): Extract feature in bbox head
in_channel (int): Input channel after RoI extractor
roi_extractor (object): The module of RoI Extractor
bbox_assigner (object): The module of Box Assigner, label and sample the
box.
num_classes (int): The number of classes
bbox_weight (List[List[float]]): The weight to get the decode box and the
length of weight is the number of cascade stage
num_cascade_stages (int): THe number of stage to refine the box
"""
def __init__(self,
head,
in_channel,
roi_extractor=_get_class_default_kwargs(RoIAlign),
bbox_assigner='BboxAssigner',
num_classes=80,
bbox_weight=[[10., 10., 5., 5.], [20.0, 20.0, 10.0, 10.0],
[30.0, 30.0, 15.0, 15.0]],
num_cascade_stages=3,
bbox_loss=None,
reg_class_agnostic=True,
stage_loss_weights=None,
loss_normalize_pos=False,
add_gt_as_proposals=[True, False, False]):
nn.Layer.__init__(self, )
self.head = head
self.roi_extractor = roi_extractor
if isinstance(roi_extractor, dict):
self.roi_extractor = RoIAlign(**roi_extractor)
self.bbox_assigner = bbox_assigner
self.num_classes = num_classes
self.bbox_weight = bbox_weight
self.num_cascade_stages = num_cascade_stages
self.bbox_loss = bbox_loss
self.stage_loss_weights = [
1. / num_cascade_stages for _ in range(num_cascade_stages)
] if stage_loss_weights is None else stage_loss_weights
self.add_gt_as_proposals = add_gt_as_proposals
assert len(
self.stage_loss_weights
) == num_cascade_stages, f'stage_loss_weights({len(self.stage_loss_weights)}) do not equal to num_cascade_stages({num_cascade_stages})'
self.reg_class_agnostic = reg_class_agnostic
num_bbox_delta = 4 if reg_class_agnostic else 4 * num_classes
self.loss_normalize_pos = loss_normalize_pos
self.bbox_score_list = []
self.bbox_delta_list = []
for i in range(num_cascade_stages):
score_name = 'bbox_score_stage{}'.format(i)
delta_name = 'bbox_delta_stage{}'.format(i)
bbox_score = self.add_sublayer(
score_name,
nn.Linear(
in_channel,
self.num_classes + 1,
weight_attr=paddle.ParamAttr(initializer=Normal(
mean=0.0, std=0.01))))
bbox_delta = self.add_sublayer(
delta_name,
nn.Linear(
in_channel,
num_bbox_delta,
weight_attr=paddle.ParamAttr(initializer=Normal(
mean=0.0, std=0.001))))
self.bbox_score_list.append(bbox_score)
self.bbox_delta_list.append(bbox_delta)
self.assigned_label = None
self.assigned_rois = None
def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):
"""
body_feats (list[Tensor]): Feature maps from backbone
rois (Tensor): RoIs generated from RPN module
rois_num (Tensor): The number of RoIs in each image
inputs (dict{Tensor}): The ground-truth of image
"""
targets = []
if self.training:
rois, rois_num, targets = self.bbox_assigner(
rois,
rois_num,
inputs,
add_gt_as_proposals=self.add_gt_as_proposals[0])
targets_list = [targets]
self.assigned_rois = (rois, rois_num)
self.assigned_targets = targets
pred_bbox = None
head_out_list = []
for i in range(self.num_cascade_stages):
if i > 0:
rois, rois_num = self._get_rois_from_boxes(pred_bbox,
inputs['im_shape'])
if self.training:
rois, rois_num, targets = self.bbox_assigner(
rois,
rois_num,
inputs,
i,
is_cascade=True,
add_gt_as_proposals=self.add_gt_as_proposals[i])
targets_list.append(targets)
rois_feat = self.roi_extractor(body_feats, rois, rois_num)
bbox_feat = self.head(rois_feat, i)
scores = self.bbox_score_list[i](bbox_feat)
deltas = self.bbox_delta_list[i](bbox_feat)
# TODO (lyuwenyu) Is it correct for only one class ?
if not self.reg_class_agnostic and i < self.num_cascade_stages - 1:
deltas = deltas.reshape([deltas.shape[0], self.num_classes, 4])
labels = scores[:, :-1].argmax(axis=-1)
if self.training:
deltas = deltas[paddle.arange(deltas.shape[0]), labels]
else:
deltas = deltas[((deltas + 10000) * F.one_hot(
labels, num_classes=self.num_classes).unsqueeze(-1) != 0
).nonzero(as_tuple=True)].reshape(
[deltas.shape[0], 4])
head_out_list.append([scores, deltas, rois])
pred_bbox = self._get_pred_bbox(deltas, rois, self.bbox_weight[i])
if self.training:
loss = {}
for stage, value in enumerate(zip(head_out_list, targets_list)):
(scores, deltas, rois), targets = value
loss_stage = self.get_loss(
scores,
deltas,
targets,
rois,
self.bbox_weight[stage],
loss_normalize_pos=self.loss_normalize_pos)
for k, v in loss_stage.items():
loss[k + "_stage{}".format(
stage)] = v * self.stage_loss_weights[stage]
return loss, bbox_feat
else:
scores, deltas, self.refined_rois = self.get_prediction(
head_out_list)
return (deltas, scores), self.head
def _get_rois_from_boxes(self, boxes, im_shape):
rois = []
for i, boxes_per_image in enumerate(boxes):
clip_box = clip_bbox(boxes_per_image, im_shape[i])
if self.training:
keep = nonempty_bbox(clip_box)
if keep.shape[0] == 0:
keep = paddle.zeros([1], dtype='int32')
clip_box = paddle.gather(clip_box, keep)
rois.append(clip_box)
rois_num = paddle.concat([paddle.shape(r)[0:1] for r in rois])
return rois, rois_num
def _get_pred_bbox(self, deltas, proposals, weights):
pred_proposals = paddle.concat(proposals) if len(
proposals) > 1 else proposals[0]
pred_bbox = delta2bbox(deltas, pred_proposals, weights)
pred_bbox = paddle.reshape(pred_bbox, [-1, deltas.shape[-1]])
num_prop = []
for p in proposals:
num_prop.append(p.shape[0])
# NOTE(dev): num_prob will be tagged as LoDTensorArray because it
# depends on batch_size under @to_static. However the argument
# num_or_sections in paddle.split does not support LoDTensorArray,
# so we use [-1] to replace it if num_prop is not list. The modification
# This ensures the correctness of both dynamic and static graphs.
if not isinstance(num_prop, list):
num_prop = [-1]
return pred_bbox.split(num_prop)
def get_prediction(self, head_out_list):
"""
head_out_list(List[Tensor]): scores, deltas, rois
"""
pred_list = []
scores_list = [F.softmax(head[0]) for head in head_out_list]
scores = paddle.add_n(scores_list) / self.num_cascade_stages
# Get deltas and rois from the last stage
_, deltas, rois = head_out_list[-1]
return scores, deltas, rois
def get_refined_rois(self, ):
return self.refined_rois
| PaddleDetection/ppdet/modeling/heads/cascade_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/cascade_head.py",
"repo_id": "PaddleDetection",
"token_count": 6551
} | 73 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Normal, Constant
from ppdet.modeling.bbox_utils import bbox2delta, delta2bbox
from ppdet.modeling.heads.fcos_head import FCOSFeat
from ppdet.core.workspace import register
__all__ = ['RetinaHead']
@register
class RetinaFeat(FCOSFeat):
"""We use FCOSFeat to construct conv layers in RetinaNet.
We rename FCOSFeat to RetinaFeat to avoid confusion.
"""
pass
@register
class RetinaHead(nn.Layer):
"""Used in RetinaNet proposed in paper https://arxiv.org/pdf/1708.02002.pdf
"""
__shared__ = ['num_classes']
__inject__ = [
'conv_feat', 'anchor_generator', 'bbox_assigner', 'loss_class',
'loss_bbox', 'nms'
]
def __init__(self,
num_classes=80,
conv_feat='RetinaFeat',
anchor_generator='RetinaAnchorGenerator',
bbox_assigner='MaxIoUAssigner',
loss_class='FocalLoss',
loss_bbox='SmoothL1Loss',
nms='MultiClassNMS',
prior_prob=0.01,
nms_pre=1000,
weights=[1., 1., 1., 1.]):
super(RetinaHead, self).__init__()
self.num_classes = num_classes
self.conv_feat = conv_feat
self.anchor_generator = anchor_generator
self.bbox_assigner = bbox_assigner
self.loss_class = loss_class
self.loss_bbox = loss_bbox
self.nms = nms
self.nms_pre = nms_pre
self.weights = weights
bias_init_value = -math.log((1 - prior_prob) / prior_prob)
num_anchors = self.anchor_generator.num_anchors
self.retina_cls = nn.Conv2D(
in_channels=self.conv_feat.feat_out,
out_channels=self.num_classes * num_anchors,
kernel_size=3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Normal(
mean=0.0, std=0.01)),
bias_attr=ParamAttr(initializer=Constant(value=bias_init_value)))
self.retina_reg = nn.Conv2D(
in_channels=self.conv_feat.feat_out,
out_channels=4 * num_anchors,
kernel_size=3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Normal(
mean=0.0, std=0.01)),
bias_attr=ParamAttr(initializer=Constant(value=0)))
def forward(self, neck_feats, targets=None):
cls_logits_list = []
bboxes_reg_list = []
for neck_feat in neck_feats:
conv_cls_feat, conv_reg_feat = self.conv_feat(neck_feat)
cls_logits = self.retina_cls(conv_cls_feat)
bbox_reg = self.retina_reg(conv_reg_feat)
cls_logits_list.append(cls_logits)
bboxes_reg_list.append(bbox_reg)
if self.training:
return self.get_loss([cls_logits_list, bboxes_reg_list], targets)
else:
return [cls_logits_list, bboxes_reg_list]
def get_loss(self, head_outputs, targets):
"""Here we calculate loss for a batch of images.
We assign anchors to gts in each image and gather all the assigned
postive and negative samples. Then loss is calculated on the gathered
samples.
"""
cls_logits_list, bboxes_reg_list = head_outputs
anchors = self.anchor_generator(cls_logits_list)
anchors = paddle.concat(anchors)
# matches: contain gt_inds
# match_labels: -1(ignore), 0(neg) or 1(pos)
matches_list, match_labels_list = [], []
# assign anchors to gts, no sampling is involved
for gt_bbox in targets['gt_bbox']:
matches, match_labels = self.bbox_assigner(anchors, gt_bbox)
matches_list.append(matches)
match_labels_list.append(match_labels)
# reshape network outputs
cls_logits = [
_.transpose([0, 2, 3, 1]).reshape([0, -1, self.num_classes])
for _ in cls_logits_list
]
bboxes_reg = [
_.transpose([0, 2, 3, 1]).reshape([0, -1, 4])
for _ in bboxes_reg_list
]
cls_logits = paddle.concat(cls_logits, axis=1)
bboxes_reg = paddle.concat(bboxes_reg, axis=1)
cls_pred_list, cls_tar_list = [], []
reg_pred_list, reg_tar_list = [], []
# find and gather preds and targets in each image
for matches, match_labels, cls_logit, bbox_reg, gt_bbox, gt_class in \
zip(matches_list, match_labels_list, cls_logits, bboxes_reg,
targets['gt_bbox'], targets['gt_class']):
pos_mask = (match_labels == 1)
neg_mask = (match_labels == 0)
chosen_mask = paddle.logical_or(pos_mask, neg_mask)
gt_class = gt_class.reshape([-1])
bg_class = paddle.to_tensor(
[self.num_classes], dtype=gt_class.dtype)
# a trick to assign num_classes to negative targets
gt_class = paddle.concat([gt_class, bg_class], axis=-1)
matches = paddle.where(neg_mask,
paddle.full_like(matches, gt_class.size - 1),
matches)
cls_pred = cls_logit[chosen_mask]
cls_tar = gt_class[matches[chosen_mask]]
reg_pred = bbox_reg[pos_mask].reshape([-1, 4])
reg_tar = gt_bbox[matches[pos_mask]].reshape([-1, 4])
reg_tar = bbox2delta(anchors[pos_mask], reg_tar, self.weights)
cls_pred_list.append(cls_pred)
cls_tar_list.append(cls_tar)
reg_pred_list.append(reg_pred)
reg_tar_list.append(reg_tar)
cls_pred = paddle.concat(cls_pred_list)
cls_tar = paddle.concat(cls_tar_list)
reg_pred = paddle.concat(reg_pred_list)
reg_tar = paddle.concat(reg_tar_list)
avg_factor = max(1.0, reg_pred.shape[0])
cls_loss = self.loss_class(
cls_pred, cls_tar, reduction='sum') / avg_factor
if reg_pred.shape[0] == 0:
reg_loss = paddle.zeros([1])
reg_loss.stop_gradient = False
else:
reg_loss = self.loss_bbox(
reg_pred, reg_tar, reduction='sum') / avg_factor
loss = cls_loss + reg_loss
out_dict = {
'loss_cls': cls_loss,
'loss_reg': reg_loss,
'loss': loss,
}
return out_dict
def get_bboxes_single(self,
anchors,
cls_scores_list,
bbox_preds_list,
im_shape,
scale_factor,
rescale=True):
assert len(cls_scores_list) == len(bbox_preds_list)
mlvl_bboxes = []
mlvl_scores = []
for anchor, cls_score, bbox_pred in zip(anchors, cls_scores_list,
bbox_preds_list):
cls_score = cls_score.reshape([-1, self.num_classes])
bbox_pred = bbox_pred.reshape([-1, 4])
if self.nms_pre is not None and cls_score.shape[0] > self.nms_pre:
max_score = cls_score.max(axis=1)
_, topk_inds = max_score.topk(self.nms_pre)
bbox_pred = bbox_pred.gather(topk_inds)
anchor = anchor.gather(topk_inds)
cls_score = cls_score.gather(topk_inds)
bbox_pred = delta2bbox(bbox_pred, anchor, self.weights).squeeze()
mlvl_bboxes.append(bbox_pred)
mlvl_scores.append(F.sigmoid(cls_score))
mlvl_bboxes = paddle.concat(mlvl_bboxes)
mlvl_bboxes = paddle.squeeze(mlvl_bboxes)
if rescale:
mlvl_bboxes = mlvl_bboxes / paddle.concat(
[scale_factor[::-1], scale_factor[::-1]])
mlvl_scores = paddle.concat(mlvl_scores)
mlvl_scores = mlvl_scores.transpose([1, 0])
return mlvl_bboxes, mlvl_scores
def decode(self, anchors, cls_logits, bboxes_reg, im_shape, scale_factor):
batch_bboxes = []
batch_scores = []
for img_id in range(cls_logits[0].shape[0]):
num_lvls = len(cls_logits)
cls_scores_list = [cls_logits[i][img_id] for i in range(num_lvls)]
bbox_preds_list = [bboxes_reg[i][img_id] for i in range(num_lvls)]
bboxes, scores = self.get_bboxes_single(
anchors, cls_scores_list, bbox_preds_list, im_shape[img_id],
scale_factor[img_id])
batch_bboxes.append(bboxes)
batch_scores.append(scores)
batch_bboxes = paddle.stack(batch_bboxes, axis=0)
batch_scores = paddle.stack(batch_scores, axis=0)
return batch_bboxes, batch_scores
def post_process(self, head_outputs, im_shape, scale_factor):
cls_logits_list, bboxes_reg_list = head_outputs
anchors = self.anchor_generator(cls_logits_list)
cls_logits = [_.transpose([0, 2, 3, 1]) for _ in cls_logits_list]
bboxes_reg = [_.transpose([0, 2, 3, 1]) for _ in bboxes_reg_list]
bboxes, scores = self.decode(anchors, cls_logits, bboxes_reg, im_shape,
scale_factor)
bbox_pred, bbox_num, nms_keep_idx = self.nms(bboxes, scores)
return bbox_pred, bbox_num, nms_keep_idx
def get_scores_single(self, cls_scores_list):
mlvl_logits = []
for cls_score in cls_scores_list:
cls_score = cls_score.reshape([-1, self.num_classes])
if self.nms_pre is not None and cls_score.shape[0] > self.nms_pre:
max_score = cls_score.max(axis=1)
_, topk_inds = max_score.topk(self.nms_pre)
cls_score = cls_score.gather(topk_inds)
mlvl_logits.append(cls_score)
mlvl_logits = paddle.concat(mlvl_logits)
mlvl_logits = mlvl_logits.transpose([1, 0])
return mlvl_logits
def decode_cls_logits(self, cls_logits_list):
cls_logits = [_.transpose([0, 2, 3, 1]) for _ in cls_logits_list]
batch_logits = []
for img_id in range(cls_logits[0].shape[0]):
num_lvls = len(cls_logits)
cls_scores_list = [cls_logits[i][img_id] for i in range(num_lvls)]
logits = self.get_scores_single(cls_scores_list)
batch_logits.append(logits)
batch_logits = paddle.stack(batch_logits, axis=0)
return batch_logits
| PaddleDetection/ppdet/modeling/heads/retina_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/retina_head.py",
"repo_id": "PaddleDetection",
"token_count": 5719
} | 74 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import six
import numpy as np
from numbers import Integral
import paddle
import paddle.nn as nn
from paddle import ParamAttr
from paddle import to_tensor
import paddle.nn.functional as F
from paddle.nn.initializer import Normal, Constant, XavierUniform
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register, serializable
from ppdet.modeling.bbox_utils import delta2bbox
from . import ops
from .initializer import xavier_uniform_, constant_
from paddle.vision.ops import DeformConv2D
def _to_list(l):
if isinstance(l, (list, tuple)):
return list(l)
return [l]
class AlignConv(nn.Layer):
def __init__(self, in_channels, out_channels, kernel_size=3, groups=1):
super(AlignConv, self).__init__()
self.kernel_size = kernel_size
self.align_conv = paddle.vision.ops.DeformConv2D(
in_channels,
out_channels,
kernel_size=self.kernel_size,
padding=(self.kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
bias_attr=None)
@paddle.no_grad()
def get_offset(self, anchors, featmap_size, stride):
"""
Args:
anchors: [B, L, 5] xc,yc,w,h,angle
featmap_size: (feat_h, feat_w)
stride: 8
Returns:
"""
batch = anchors.shape[0]
dtype = anchors.dtype
feat_h, feat_w = featmap_size
pad = (self.kernel_size - 1) // 2
idx = paddle.arange(-pad, pad + 1, dtype=dtype)
yy, xx = paddle.meshgrid(idx, idx)
xx = paddle.reshape(xx, [-1])
yy = paddle.reshape(yy, [-1])
# get sampling locations of default conv
xc = paddle.arange(0, feat_w, dtype=dtype)
yc = paddle.arange(0, feat_h, dtype=dtype)
yc, xc = paddle.meshgrid(yc, xc)
xc = paddle.reshape(xc, [-1, 1])
yc = paddle.reshape(yc, [-1, 1])
x_conv = xc + xx
y_conv = yc + yy
# get sampling locations of anchors
x_ctr, y_ctr, w, h, a = paddle.split(anchors, 5, axis=-1)
x_ctr = x_ctr / stride
y_ctr = y_ctr / stride
w_s = w / stride
h_s = h / stride
cos, sin = paddle.cos(a), paddle.sin(a)
dw, dh = w_s / self.kernel_size, h_s / self.kernel_size
x, y = dw * xx, dh * yy
xr = cos * x - sin * y
yr = sin * x + cos * y
x_anchor, y_anchor = xr + x_ctr, yr + y_ctr
# get offset filed
offset_x = x_anchor - x_conv
offset_y = y_anchor - y_conv
offset = paddle.stack([offset_y, offset_x], axis=-1)
offset = offset.reshape(
[batch, feat_h, feat_w, self.kernel_size * self.kernel_size * 2])
offset = offset.transpose([0, 3, 1, 2])
return offset
def forward(self, x, refine_anchors, featmap_size, stride):
batch = paddle.shape(x)[0].numpy()
offset = self.get_offset(refine_anchors, featmap_size, stride)
if self.training:
x = F.relu(self.align_conv(x, offset.detach()))
else:
x = F.relu(self.align_conv(x, offset))
return x
class DeformableConvV2(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
lr_scale=1,
regularizer=None,
skip_quant=False,
dcn_bias_regularizer=L2Decay(0.),
dcn_bias_lr_scale=2.):
super(DeformableConvV2, self).__init__()
self.offset_channel = 2 * kernel_size**2
self.mask_channel = kernel_size**2
if lr_scale == 1 and regularizer is None:
offset_bias_attr = ParamAttr(initializer=Constant(0.))
else:
offset_bias_attr = ParamAttr(
initializer=Constant(0.),
learning_rate=lr_scale,
regularizer=regularizer)
self.conv_offset = nn.Conv2D(
in_channels,
3 * kernel_size**2,
kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
weight_attr=ParamAttr(initializer=Constant(0.0)),
bias_attr=offset_bias_attr)
if skip_quant:
self.conv_offset.skip_quant = True
if bias_attr:
# in FCOS-DCN head, specifically need learning_rate and regularizer
dcn_bias_attr = ParamAttr(
initializer=Constant(value=0),
regularizer=dcn_bias_regularizer,
learning_rate=dcn_bias_lr_scale)
else:
# in ResNet backbone, do not need bias
dcn_bias_attr = False
self.conv_dcn = DeformConv2D(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2 * dilation,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=dcn_bias_attr)
def forward(self, x):
offset_mask = self.conv_offset(x)
offset, mask = paddle.split(
offset_mask,
num_or_sections=[self.offset_channel, self.mask_channel],
axis=1)
mask = F.sigmoid(mask)
y = self.conv_dcn(x, offset, mask=mask)
return y
class ConvNormLayer(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size,
stride,
groups=1,
norm_type='bn',
norm_decay=0.,
norm_groups=32,
use_dcn=False,
bias_on=False,
lr_scale=1.,
freeze_norm=False,
initializer=Normal(
mean=0., std=0.01),
skip_quant=False,
dcn_lr_scale=2.,
dcn_regularizer=L2Decay(0.)):
super(ConvNormLayer, self).__init__()
assert norm_type in ['bn', 'sync_bn', 'gn', None]
if bias_on:
bias_attr = ParamAttr(
initializer=Constant(value=0.), learning_rate=lr_scale)
else:
bias_attr = False
if not use_dcn:
self.conv = nn.Conv2D(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(
initializer=initializer, learning_rate=1.),
bias_attr=bias_attr)
if skip_quant:
self.conv.skip_quant = True
else:
# in FCOS-DCN head, specifically need learning_rate and regularizer
self.conv = DeformableConvV2(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(
initializer=initializer, learning_rate=1.),
bias_attr=True,
lr_scale=dcn_lr_scale,
regularizer=dcn_regularizer,
dcn_bias_regularizer=dcn_regularizer,
dcn_bias_lr_scale=dcn_lr_scale,
skip_quant=skip_quant)
norm_lr = 0. if freeze_norm else 1.
param_attr = ParamAttr(
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay) if norm_decay is not None else None)
bias_attr = ParamAttr(
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay) if norm_decay is not None else None)
if norm_type in ['bn', 'sync_bn']:
self.norm = nn.BatchNorm2D(
ch_out, weight_attr=param_attr, bias_attr=bias_attr)
elif norm_type == 'gn':
self.norm = nn.GroupNorm(
num_groups=norm_groups,
num_channels=ch_out,
weight_attr=param_attr,
bias_attr=bias_attr)
else:
self.norm = None
def forward(self, inputs):
out = self.conv(inputs)
if self.norm is not None:
out = self.norm(out)
return out
class LiteConv(nn.Layer):
def __init__(self,
in_channels,
out_channels,
stride=1,
with_act=True,
norm_type='sync_bn',
name=None):
super(LiteConv, self).__init__()
self.lite_conv = nn.Sequential()
conv1 = ConvNormLayer(
in_channels,
in_channels,
filter_size=5,
stride=stride,
groups=in_channels,
norm_type=norm_type,
initializer=XavierUniform())
conv2 = ConvNormLayer(
in_channels,
out_channels,
filter_size=1,
stride=stride,
norm_type=norm_type,
initializer=XavierUniform())
conv3 = ConvNormLayer(
out_channels,
out_channels,
filter_size=1,
stride=stride,
norm_type=norm_type,
initializer=XavierUniform())
conv4 = ConvNormLayer(
out_channels,
out_channels,
filter_size=5,
stride=stride,
groups=out_channels,
norm_type=norm_type,
initializer=XavierUniform())
conv_list = [conv1, conv2, conv3, conv4]
self.lite_conv.add_sublayer('conv1', conv1)
self.lite_conv.add_sublayer('relu6_1', nn.ReLU6())
self.lite_conv.add_sublayer('conv2', conv2)
if with_act:
self.lite_conv.add_sublayer('relu6_2', nn.ReLU6())
self.lite_conv.add_sublayer('conv3', conv3)
self.lite_conv.add_sublayer('relu6_3', nn.ReLU6())
self.lite_conv.add_sublayer('conv4', conv4)
if with_act:
self.lite_conv.add_sublayer('relu6_4', nn.ReLU6())
def forward(self, inputs):
out = self.lite_conv(inputs)
return out
class DropBlock(nn.Layer):
def __init__(self, block_size, keep_prob, name=None, data_format='NCHW'):
"""
DropBlock layer, see https://arxiv.org/abs/1810.12890
Args:
block_size (int): block size
keep_prob (int): keep probability
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
super(DropBlock, self).__init__()
self.block_size = block_size
self.keep_prob = keep_prob
self.name = name
self.data_format = data_format
def forward(self, x):
if not self.training or self.keep_prob == 1:
return x
else:
gamma = (1. - self.keep_prob) / (self.block_size**2)
if self.data_format == 'NCHW':
shape = x.shape[2:]
else:
shape = x.shape[1:3]
for s in shape:
gamma *= s / (s - self.block_size + 1)
matrix = paddle.cast(paddle.rand(x.shape) < gamma, x.dtype)
mask_inv = F.max_pool2d(
matrix,
self.block_size,
stride=1,
padding=self.block_size // 2,
data_format=self.data_format)
mask = 1. - mask_inv
mask = mask.astype('float32')
x = x.astype('float32')
y = x * mask * (mask.numel() / mask.sum())
return y
@register
@serializable
class AnchorGeneratorSSD(object):
def __init__(self,
steps=[8, 16, 32, 64, 100, 300],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
min_ratio=15,
max_ratio=90,
base_size=300,
min_sizes=[30.0, 60.0, 111.0, 162.0, 213.0, 264.0],
max_sizes=[60.0, 111.0, 162.0, 213.0, 264.0, 315.0],
offset=0.5,
flip=True,
clip=False,
min_max_aspect_ratios_order=False):
self.steps = steps
self.aspect_ratios = aspect_ratios
self.min_ratio = min_ratio
self.max_ratio = max_ratio
self.base_size = base_size
self.min_sizes = min_sizes
self.max_sizes = max_sizes
self.offset = offset
self.flip = flip
self.clip = clip
self.min_max_aspect_ratios_order = min_max_aspect_ratios_order
if self.min_sizes == [] and self.max_sizes == []:
num_layer = len(aspect_ratios)
step = int(
math.floor(((self.max_ratio - self.min_ratio)) / (num_layer - 2
)))
for ratio in six.moves.range(self.min_ratio, self.max_ratio + 1,
step):
self.min_sizes.append(self.base_size * ratio / 100.)
self.max_sizes.append(self.base_size * (ratio + step) / 100.)
self.min_sizes = [self.base_size * .10] + self.min_sizes
self.max_sizes = [self.base_size * .20] + self.max_sizes
self.num_priors = []
for aspect_ratio, min_size, max_size in zip(
aspect_ratios, self.min_sizes, self.max_sizes):
if isinstance(min_size, (list, tuple)):
self.num_priors.append(
len(_to_list(min_size)) + len(_to_list(max_size)))
else:
self.num_priors.append((len(aspect_ratio) * 2 + 1) * len(
_to_list(min_size)) + len(_to_list(max_size)))
def __call__(self, inputs, image):
boxes = []
for input, min_size, max_size, aspect_ratio, step in zip(
inputs, self.min_sizes, self.max_sizes, self.aspect_ratios,
self.steps):
box, _ = ops.prior_box(
input=input,
image=image,
min_sizes=_to_list(min_size),
max_sizes=_to_list(max_size),
aspect_ratios=aspect_ratio,
flip=self.flip,
clip=self.clip,
steps=[step, step],
offset=self.offset,
min_max_aspect_ratios_order=self.min_max_aspect_ratios_order)
boxes.append(paddle.reshape(box, [-1, 4]))
return boxes
@register
@serializable
class RCNNBox(object):
__shared__ = ['num_classes', 'export_onnx']
def __init__(self,
prior_box_var=[10., 10., 5., 5.],
code_type="decode_center_size",
box_normalized=False,
num_classes=80,
export_onnx=False):
super(RCNNBox, self).__init__()
self.prior_box_var = prior_box_var
self.code_type = code_type
self.box_normalized = box_normalized
self.num_classes = num_classes
self.export_onnx = export_onnx
def __call__(self, bbox_head_out, rois, im_shape, scale_factor):
bbox_pred = bbox_head_out[0]
cls_prob = bbox_head_out[1]
roi = rois[0]
rois_num = rois[1]
if self.export_onnx:
onnx_rois_num_per_im = rois_num[0]
origin_shape = paddle.expand(im_shape[0, :],
[onnx_rois_num_per_im, 2])
else:
origin_shape_list = []
if isinstance(roi, list):
batch_size = len(roi)
else:
batch_size = paddle.slice(paddle.shape(im_shape), [0], [0], [1])
# bbox_pred.shape: [N, C*4]
for idx in range(batch_size):
rois_num_per_im = rois_num[idx]
expand_im_shape = paddle.expand(im_shape[idx, :],
[rois_num_per_im, 2])
origin_shape_list.append(expand_im_shape)
origin_shape = paddle.concat(origin_shape_list)
# bbox_pred.shape: [N, C*4]
# C=num_classes in faster/mask rcnn(bbox_head), C=1 in cascade rcnn(cascade_head)
bbox = paddle.concat(roi)
bbox = delta2bbox(bbox_pred, bbox, self.prior_box_var)
scores = cls_prob[:, :-1]
# bbox.shape: [N, C, 4]
# bbox.shape[1] must be equal to scores.shape[1]
total_num = bbox.shape[0]
bbox_dim = bbox.shape[-1]
bbox = paddle.expand(bbox, [total_num, self.num_classes, bbox_dim])
origin_h = paddle.unsqueeze(origin_shape[:, 0], axis=1)
origin_w = paddle.unsqueeze(origin_shape[:, 1], axis=1)
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bbox[:, :, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(bbox[:, :, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(bbox[:, :, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(bbox[:, :, 3], origin_h), zeros)
bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
bboxes = (bbox, rois_num)
return bboxes, scores
@register
@serializable
class MultiClassNMS(object):
def __init__(self,
score_threshold=.05,
nms_top_k=-1,
keep_top_k=100,
nms_threshold=.5,
normalized=True,
nms_eta=1.0,
return_index=False,
return_rois_num=True,
trt=False):
super(MultiClassNMS, self).__init__()
self.score_threshold = score_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
self.nms_threshold = nms_threshold
self.normalized = normalized
self.nms_eta = nms_eta
self.return_index = return_index
self.return_rois_num = return_rois_num
self.trt = trt
def __call__(self, bboxes, score, background_label=-1):
"""
bboxes (Tensor|List[Tensor]): 1. (Tensor) Predicted bboxes with shape
[N, M, 4], N is the batch size and M
is the number of bboxes
2. (List[Tensor]) bboxes and bbox_num,
bboxes have shape of [M, C, 4], C
is the class number and bbox_num means
the number of bboxes of each batch with
shape [N,]
score (Tensor): Predicted scores with shape [N, C, M] or [M, C]
background_label (int): Ignore the background label; For example, RCNN
is num_classes and YOLO is -1.
"""
kwargs = self.__dict__.copy()
if isinstance(bboxes, tuple):
bboxes, bbox_num = bboxes
kwargs.update({'rois_num': bbox_num})
if background_label > -1:
kwargs.update({'background_label': background_label})
kwargs.pop('trt')
# TODO(wangxinxin08): paddle version should be develop or 2.3 and above to run nms on tensorrt
if self.trt and (int(paddle.version.major) == 0 or
(int(paddle.version.major) >= 2 and
int(paddle.version.minor) >= 3)):
# TODO(wangxinxin08): tricky switch to run nms on tensorrt
kwargs.update({'nms_eta': 1.1})
bbox, bbox_num, _ = ops.multiclass_nms(bboxes, score, **kwargs)
bbox = bbox.reshape([1, -1, 6])
idx = paddle.nonzero(bbox[..., 0] != -1)
bbox = paddle.gather_nd(bbox, idx)
return bbox, bbox_num, None
else:
return ops.multiclass_nms(bboxes, score, **kwargs)
@register
@serializable
class MatrixNMS(object):
__append_doc__ = True
def __init__(self,
score_threshold=.05,
post_threshold=.05,
nms_top_k=-1,
keep_top_k=100,
use_gaussian=False,
gaussian_sigma=2.,
normalized=False,
background_label=0):
super(MatrixNMS, self).__init__()
self.score_threshold = score_threshold
self.post_threshold = post_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
self.normalized = normalized
self.use_gaussian = use_gaussian
self.gaussian_sigma = gaussian_sigma
self.background_label = background_label
def __call__(self, bbox, score, *args):
return ops.matrix_nms(
bboxes=bbox,
scores=score,
score_threshold=self.score_threshold,
post_threshold=self.post_threshold,
nms_top_k=self.nms_top_k,
keep_top_k=self.keep_top_k,
use_gaussian=self.use_gaussian,
gaussian_sigma=self.gaussian_sigma,
background_label=self.background_label,
normalized=self.normalized)
@register
@serializable
class YOLOBox(object):
__shared__ = ['num_classes']
def __init__(self,
num_classes=80,
conf_thresh=0.005,
downsample_ratio=32,
clip_bbox=True,
scale_x_y=1.):
self.num_classes = num_classes
self.conf_thresh = conf_thresh
self.downsample_ratio = downsample_ratio
self.clip_bbox = clip_bbox
self.scale_x_y = scale_x_y
def __call__(self,
yolo_head_out,
anchors,
im_shape,
scale_factor,
var_weight=None):
boxes_list = []
scores_list = []
origin_shape = im_shape / scale_factor
origin_shape = paddle.cast(origin_shape, 'int32')
for i, head_out in enumerate(yolo_head_out):
boxes, scores = paddle.vision.ops.yolo_box(
head_out,
origin_shape,
anchors[i],
self.num_classes,
self.conf_thresh,
self.downsample_ratio // 2**i,
self.clip_bbox,
scale_x_y=self.scale_x_y)
boxes_list.append(boxes)
scores_list.append(paddle.transpose(scores, perm=[0, 2, 1]))
yolo_boxes = paddle.concat(boxes_list, axis=1)
yolo_scores = paddle.concat(scores_list, axis=2)
return yolo_boxes, yolo_scores
@register
@serializable
class SSDBox(object):
def __init__(self,
is_normalized=True,
prior_box_var=[0.1, 0.1, 0.2, 0.2],
use_fuse_decode=False):
self.is_normalized = is_normalized
self.norm_delta = float(not self.is_normalized)
self.prior_box_var = prior_box_var
self.use_fuse_decode = use_fuse_decode
def __call__(self,
preds,
prior_boxes,
im_shape,
scale_factor,
var_weight=None):
boxes, scores = preds
boxes = paddle.concat(boxes, axis=1)
prior_boxes = paddle.concat(prior_boxes)
if self.use_fuse_decode:
output_boxes = ops.box_coder(
prior_boxes,
self.prior_box_var,
boxes,
code_type="decode_center_size",
box_normalized=self.is_normalized)
else:
pb_w = prior_boxes[:, 2] - prior_boxes[:, 0] + self.norm_delta
pb_h = prior_boxes[:, 3] - prior_boxes[:, 1] + self.norm_delta
pb_x = prior_boxes[:, 0] + pb_w * 0.5
pb_y = prior_boxes[:, 1] + pb_h * 0.5
out_x = pb_x + boxes[:, :, 0] * pb_w * self.prior_box_var[0]
out_y = pb_y + boxes[:, :, 1] * pb_h * self.prior_box_var[1]
out_w = paddle.exp(boxes[:, :, 2] * self.prior_box_var[2]) * pb_w
out_h = paddle.exp(boxes[:, :, 3] * self.prior_box_var[3]) * pb_h
output_boxes = paddle.stack(
[
out_x - out_w / 2., out_y - out_h / 2., out_x + out_w / 2.,
out_y + out_h / 2.
],
axis=-1)
if self.is_normalized:
h = (im_shape[:, 0] / scale_factor[:, 0]).unsqueeze(-1)
w = (im_shape[:, 1] / scale_factor[:, 1]).unsqueeze(-1)
im_shape = paddle.stack([w, h, w, h], axis=-1)
output_boxes *= im_shape
else:
output_boxes[..., -2:] -= 1.0
output_scores = F.softmax(paddle.concat(
scores, axis=1)).transpose([0, 2, 1])
return output_boxes, output_scores
@register
class TTFBox(object):
__shared__ = ['down_ratio']
def __init__(self, max_per_img=100, score_thresh=0.01, down_ratio=4):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.score_thresh = score_thresh
self.down_ratio = down_ratio
def _simple_nms(self, heat, kernel=3):
"""
Use maxpool to filter the max score, get local peaks.
"""
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
keep = paddle.cast(hmax == heat, 'float32')
return heat * keep
def _topk(self, scores):
"""
Select top k scores and decode to get xy coordinates.
"""
k = self.max_per_img
shape_fm = paddle.shape(scores)
shape_fm.stop_gradient = True
cat, height, width = shape_fm[1], shape_fm[2], shape_fm[3]
# batch size is 1
scores_r = paddle.reshape(scores, [cat, -1])
topk_scores, topk_inds = paddle.topk(scores_r, k)
topk_ys = topk_inds // width
topk_xs = topk_inds % width
topk_score_r = paddle.reshape(topk_scores, [-1])
topk_score, topk_ind = paddle.topk(topk_score_r, k)
k_t = paddle.full(paddle.shape(topk_ind), k, dtype='int64')
topk_clses = paddle.cast(paddle.floor_divide(topk_ind, k_t), 'float32')
topk_inds = paddle.reshape(topk_inds, [-1])
topk_ys = paddle.reshape(topk_ys, [-1, 1])
topk_xs = paddle.reshape(topk_xs, [-1, 1])
topk_inds = paddle.gather(topk_inds, topk_ind)
topk_ys = paddle.gather(topk_ys, topk_ind)
topk_xs = paddle.gather(topk_xs, topk_ind)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def _decode(self, hm, wh, im_shape, scale_factor):
heatmap = F.sigmoid(hm)
heat = self._simple_nms(heatmap)
scores, inds, clses, ys, xs = self._topk(heat)
ys = paddle.cast(ys, 'float32') * self.down_ratio
xs = paddle.cast(xs, 'float32') * self.down_ratio
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(clses, [1])
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
results = paddle.concat([clses, scores, bboxes], axis=1)
# hack: append result with cls=-1 and score=1. to avoid all scores
# are less than score_thresh which may cause error in gather.
fill_r = paddle.to_tensor(np.array([[-1, 1, 0, 0, 0, 0]]))
fill_r = paddle.cast(fill_r, results.dtype)
results = paddle.concat([results, fill_r])
scores = results[:, 1]
valid_ind = paddle.nonzero(scores > self.score_thresh)
results = paddle.gather(results, valid_ind)
return results, paddle.shape(results)[0:1]
def __call__(self, hm, wh, im_shape, scale_factor):
results = []
results_num = []
for i in range(scale_factor.shape[0]):
result, num = self._decode(hm[i:i + 1, ], wh[i:i + 1, ],
im_shape[i:i + 1, ],
scale_factor[i:i + 1, ])
results.append(result)
results_num.append(num)
results = paddle.concat(results, axis=0)
results_num = paddle.concat(results_num, axis=0)
return results, results_num
@register
@serializable
class JDEBox(object):
__shared__ = ['num_classes']
def __init__(self, num_classes=1, conf_thresh=0.3, downsample_ratio=32):
self.num_classes = num_classes
self.conf_thresh = conf_thresh
self.downsample_ratio = downsample_ratio
def generate_anchor(self, nGh, nGw, anchor_wh):
nA = len(anchor_wh)
yv, xv = paddle.meshgrid([paddle.arange(nGh), paddle.arange(nGw)])
mesh = paddle.stack(
(xv, yv), axis=0).cast(dtype='float32') # 2 x nGh x nGw
meshs = paddle.tile(mesh, [nA, 1, 1, 1])
anchor_offset_mesh = anchor_wh[:, :, None][:, :, :, None].repeat(
int(nGh), axis=-2).repeat(
int(nGw), axis=-1)
anchor_offset_mesh = paddle.to_tensor(
anchor_offset_mesh.astype(np.float32))
# nA x 2 x nGh x nGw
anchor_mesh = paddle.concat([meshs, anchor_offset_mesh], axis=1)
anchor_mesh = paddle.transpose(anchor_mesh,
[0, 2, 3, 1]) # (nA x nGh x nGw) x 4
return anchor_mesh
def decode_delta(self, delta, fg_anchor_list):
px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
fg_anchor_list[:, 2], fg_anchor_list[:,3]
dx, dy, dw, dh = delta[:, 0], delta[:, 1], delta[:, 2], delta[:, 3]
gx = pw * dx + px
gy = ph * dy + py
gw = pw * paddle.exp(dw)
gh = ph * paddle.exp(dh)
gx1 = gx - gw * 0.5
gy1 = gy - gh * 0.5
gx2 = gx + gw * 0.5
gy2 = gy + gh * 0.5
return paddle.stack([gx1, gy1, gx2, gy2], axis=1)
def decode_delta_map(self, nA, nGh, nGw, delta_map, anchor_vec):
anchor_mesh = self.generate_anchor(nGh, nGw, anchor_vec)
anchor_mesh = paddle.unsqueeze(anchor_mesh, 0)
pred_list = self.decode_delta(
paddle.reshape(
delta_map, shape=[-1, 4]),
paddle.reshape(
anchor_mesh, shape=[-1, 4]))
pred_map = paddle.reshape(pred_list, shape=[nA * nGh * nGw, 4])
return pred_map
def _postprocessing_by_level(self, nA, stride, head_out, anchor_vec):
boxes_shape = head_out.shape # [nB, nA*6, nGh, nGw]
nGh, nGw = boxes_shape[-2], boxes_shape[-1]
nB = 1 # TODO: only support bs=1 now
boxes_list, scores_list = [], []
for idx in range(nB):
p = paddle.reshape(
head_out[idx], shape=[nA, self.num_classes + 5, nGh, nGw])
p = paddle.transpose(p, perm=[0, 2, 3, 1]) # [nA, nGh, nGw, 6]
delta_map = p[:, :, :, :4]
boxes = self.decode_delta_map(nA, nGh, nGw, delta_map, anchor_vec)
# [nA * nGh * nGw, 4]
boxes_list.append(boxes * stride)
p_conf = paddle.transpose(
p[:, :, :, 4:6], perm=[3, 0, 1, 2]) # [2, nA, nGh, nGw]
p_conf = F.softmax(
p_conf, axis=0)[1, :, :, :].unsqueeze(-1) # [nA, nGh, nGw, 1]
scores = paddle.reshape(p_conf, shape=[nA * nGh * nGw, 1])
scores_list.append(scores)
boxes_results = paddle.stack(boxes_list)
scores_results = paddle.stack(scores_list)
return boxes_results, scores_results
def __call__(self, yolo_head_out, anchors):
bbox_pred_list = []
for i, head_out in enumerate(yolo_head_out):
stride = self.downsample_ratio // 2**i
anc_w, anc_h = anchors[i][0::2], anchors[i][1::2]
anchor_vec = np.stack((anc_w, anc_h), axis=1) / stride
nA = len(anc_w)
boxes, scores = self._postprocessing_by_level(nA, stride, head_out,
anchor_vec)
bbox_pred_list.append(paddle.concat([boxes, scores], axis=-1))
yolo_boxes_scores = paddle.concat(bbox_pred_list, axis=1)
boxes_idx_over_conf_thr = paddle.nonzero(
yolo_boxes_scores[:, :, -1] > self.conf_thresh)
boxes_idx_over_conf_thr.stop_gradient = True
return boxes_idx_over_conf_thr, yolo_boxes_scores
@register
@serializable
class MaskMatrixNMS(object):
"""
Matrix NMS for multi-class masks.
Args:
update_threshold (float): Updated threshold of categroy score in second time.
pre_nms_top_n (int): Number of total instance to be kept per image before NMS
post_nms_top_n (int): Number of total instance to be kept per image after NMS.
kernel (str): 'linear' or 'gaussian'.
sigma (float): std in gaussian method.
Input:
seg_preds (Variable): shape (n, h, w), segmentation feature maps
seg_masks (Variable): shape (n, h, w), segmentation feature maps
cate_labels (Variable): shape (n), mask labels in descending order
cate_scores (Variable): shape (n), mask scores in descending order
sum_masks (Variable): a float tensor of the sum of seg_masks
Returns:
Variable: cate_scores, tensors of shape (n)
"""
def __init__(self,
update_threshold=0.05,
pre_nms_top_n=500,
post_nms_top_n=100,
kernel='gaussian',
sigma=2.0):
super(MaskMatrixNMS, self).__init__()
self.update_threshold = update_threshold
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.kernel = kernel
self.sigma = sigma
def _sort_score(self, scores, top_num):
if paddle.shape(scores)[0] > top_num:
return paddle.topk(scores, top_num)[1]
else:
return paddle.argsort(scores, descending=True)
def __call__(self,
seg_preds,
seg_masks,
cate_labels,
cate_scores,
sum_masks=None):
# sort and keep top nms_pre
sort_inds = self._sort_score(cate_scores, self.pre_nms_top_n)
seg_masks = paddle.gather(seg_masks, index=sort_inds)
seg_preds = paddle.gather(seg_preds, index=sort_inds)
sum_masks = paddle.gather(sum_masks, index=sort_inds)
cate_scores = paddle.gather(cate_scores, index=sort_inds)
cate_labels = paddle.gather(cate_labels, index=sort_inds)
seg_masks = paddle.flatten(seg_masks, start_axis=1, stop_axis=-1)
# inter.
inter_matrix = paddle.mm(seg_masks, paddle.transpose(seg_masks, [1, 0]))
n_samples = paddle.shape(cate_labels)
# union.
sum_masks_x = paddle.expand(sum_masks, shape=[n_samples, n_samples])
# iou.
iou_matrix = (inter_matrix / (
sum_masks_x + paddle.transpose(sum_masks_x, [1, 0]) - inter_matrix))
iou_matrix = paddle.triu(iou_matrix, diagonal=1)
# label_specific matrix.
cate_labels_x = paddle.expand(cate_labels, shape=[n_samples, n_samples])
label_matrix = paddle.cast(
(cate_labels_x == paddle.transpose(cate_labels_x, [1, 0])),
'float32')
label_matrix = paddle.triu(label_matrix, diagonal=1)
# IoU compensation
compensate_iou = paddle.max((iou_matrix * label_matrix), axis=0)
compensate_iou = paddle.expand(
compensate_iou, shape=[n_samples, n_samples])
compensate_iou = paddle.transpose(compensate_iou, [1, 0])
# IoU decay
decay_iou = iou_matrix * label_matrix
# matrix nms
if self.kernel == 'gaussian':
decay_matrix = paddle.exp(-1 * self.sigma * (decay_iou**2))
compensate_matrix = paddle.exp(-1 * self.sigma *
(compensate_iou**2))
decay_coefficient = paddle.min(decay_matrix / compensate_matrix,
axis=0)
elif self.kernel == 'linear':
decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
decay_coefficient = paddle.min(decay_matrix, axis=0)
else:
raise NotImplementedError
# update the score.
cate_scores = cate_scores * decay_coefficient
y = paddle.zeros(shape=paddle.shape(cate_scores), dtype='float32')
keep = paddle.where(cate_scores >= self.update_threshold, cate_scores,
y)
keep = paddle.nonzero(keep)
keep = paddle.squeeze(keep, axis=[1])
# Prevent empty and increase fake data
keep = paddle.concat(
[keep, paddle.cast(paddle.shape(cate_scores)[0:1] - 1, 'int64')])
seg_preds = paddle.gather(seg_preds, index=keep)
cate_scores = paddle.gather(cate_scores, index=keep)
cate_labels = paddle.gather(cate_labels, index=keep)
# sort and keep top_k
sort_inds = self._sort_score(cate_scores, self.post_nms_top_n)
seg_preds = paddle.gather(seg_preds, index=sort_inds)
cate_scores = paddle.gather(cate_scores, index=sort_inds)
cate_labels = paddle.gather(cate_labels, index=sort_inds)
return seg_preds, cate_scores, cate_labels
def Conv2d(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
weight_init=Normal(std=0.001),
bias_init=Constant(0.)):
weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
if bias:
bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
else:
bias_attr = False
conv = nn.Conv2D(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
weight_attr=weight_attr,
bias_attr=bias_attr)
return conv
def ConvTranspose2d(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
weight_init=Normal(std=0.001),
bias_init=Constant(0.)):
weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
if bias:
bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
else:
bias_attr = False
conv = nn.Conv2DTranspose(
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
dilation,
groups,
weight_attr=weight_attr,
bias_attr=bias_attr)
return conv
def BatchNorm2d(num_features, eps=1e-05, momentum=0.9, affine=True):
if not affine:
weight_attr = False
bias_attr = False
else:
weight_attr = None
bias_attr = None
batchnorm = nn.BatchNorm2D(
num_features,
momentum,
eps,
weight_attr=weight_attr,
bias_attr=bias_attr)
return batchnorm
def ReLU():
return nn.ReLU()
def Upsample(scale_factor=None, mode='nearest', align_corners=False):
return nn.Upsample(None, scale_factor, mode, align_corners)
def MaxPool(kernel_size, stride, padding, ceil_mode=False):
return nn.MaxPool2D(kernel_size, stride, padding, ceil_mode=ceil_mode)
class Concat(nn.Layer):
def __init__(self, dim=0):
super(Concat, self).__init__()
self.dim = dim
def forward(self, inputs):
return paddle.concat(inputs, axis=self.dim)
def extra_repr(self):
return 'dim={}'.format(self.dim)
def _convert_attention_mask(attn_mask, dtype):
"""
Convert the attention mask to the target dtype we expect.
Parameters:
attn_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
dtype (VarType): The target type of `attn_mask` we expect.
Returns:
Tensor: A Tensor with shape same as input `attn_mask`, with data type `dtype`.
"""
return nn.layer.transformer._convert_attention_mask(attn_mask, dtype)
@register
class MultiHeadAttention(nn.Layer):
"""
Attention mapps queries and a set of key-value pairs to outputs, and
Multi-Head Attention performs multiple parallel attention to jointly attending
to information from different representation subspaces.
Please refer to `Attention Is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`_
for more details.
Parameters:
embed_dim (int): The expected feature size in the input and output.
num_heads (int): The number of heads in multi-head attention.
dropout (float, optional): The dropout probability used on attention
weights to drop some attention targets. 0 for no dropout. Default 0
kdim (int, optional): The feature size in key. If None, assumed equal to
`embed_dim`. Default None.
vdim (int, optional): The feature size in value. If None, assumed equal to
`embed_dim`. Default None.
need_weights (bool, optional): Indicate whether to return the attention
weights. Default False.
Examples:
.. code-block:: python
import paddle
# encoder input: [batch_size, sequence_length, d_model]
query = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, num_heads, query_len, query_len]
attn_mask = paddle.rand((2, 2, 4, 4))
multi_head_attn = paddle.nn.MultiHeadAttention(128, 2)
output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
"""
def __init__(self,
embed_dim,
num_heads,
dropout=0.,
kdim=None,
vdim=None,
need_weights=False):
super(MultiHeadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.need_weights = need_weights
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim:
self.in_proj_weight = self.create_parameter(
shape=[embed_dim, 3 * embed_dim],
attr=None,
dtype=self._dtype,
is_bias=False)
self.in_proj_bias = self.create_parameter(
shape=[3 * embed_dim],
attr=None,
dtype=self._dtype,
is_bias=True)
else:
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.k_proj = nn.Linear(self.kdim, embed_dim)
self.v_proj = nn.Linear(self.vdim, embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim)
self._type_list = ('q_proj', 'k_proj', 'v_proj')
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
else:
constant_(p)
def compute_qkv(self, tensor, index):
if self._qkv_same_embed_dim:
tensor = F.linear(
x=tensor,
weight=self.in_proj_weight[:, index * self.embed_dim:(index + 1)
* self.embed_dim],
bias=self.in_proj_bias[index * self.embed_dim:(index + 1) *
self.embed_dim]
if self.in_proj_bias is not None else None)
else:
tensor = getattr(self, self._type_list[index])(tensor)
tensor = tensor.reshape(
[0, 0, self.num_heads, self.head_dim]).transpose([0, 2, 1, 3])
return tensor
def forward(self, query, key=None, value=None, attn_mask=None):
r"""
Applies multi-head attention to map queries and a set of key-value pairs
to outputs.
Parameters:
query (Tensor): The queries for multi-head attention. It is a
tensor with shape `[batch_size, query_length, embed_dim]`. The
data type should be float32 or float64.
key (Tensor, optional): The keys for multi-head attention. It is
a tensor with shape `[batch_size, key_length, kdim]`. The
data type should be float32 or float64. If None, use `query` as
`key`. Default None.
value (Tensor, optional): The values for multi-head attention. It
is a tensor with shape `[batch_size, value_length, vdim]`.
The data type should be float32 or float64. If None, use `query` as
`value`. Default None.
attn_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `query`, representing attention output. Or a tuple if \
`need_weights` is True or `cache` is not None. If `need_weights` \
is True, except for attention output, the tuple also includes \
the attention weights tensor shaped `[batch_size, num_heads, query_length, key_length]`. \
If `cache` is not None, the tuple then includes the new cache \
having the same type as `cache`, and if it is `StaticCache`, it \
is same as the input `cache`, if it is `Cache`, the new cache \
reserves tensors concatanating raw tensors with intermediate \
results of current query.
"""
key = query if key is None else key
value = query if value is None else value
# compute q ,k ,v
q, k, v = (self.compute_qkv(t, i)
for i, t in enumerate([query, key, value]))
# scale dot product attention
product = paddle.matmul(x=q, y=k, transpose_y=True)
scaling = float(self.head_dim)**-0.5
product = product * scaling
if attn_mask is not None:
# Support bool or int mask
attn_mask = _convert_attention_mask(attn_mask, product.dtype)
product = product + attn_mask
weights = F.softmax(product)
if self.dropout:
weights = F.dropout(
weights,
self.dropout,
training=self.training,
mode="upscale_in_train")
out = paddle.matmul(weights, v)
# combine heads
out = paddle.transpose(out, perm=[0, 2, 1, 3])
out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
# project to output
out = self.out_proj(out)
outs = [out]
if self.need_weights:
outs.append(weights)
return out if len(outs) == 1 else tuple(outs)
@register
class ConvMixer(nn.Layer):
def __init__(
self,
dim,
depth,
kernel_size=3, ):
super().__init__()
self.dim = dim
self.depth = depth
self.kernel_size = kernel_size
self.mixer = self.conv_mixer(dim, depth, kernel_size)
def forward(self, x):
return self.mixer(x)
@staticmethod
def conv_mixer(
dim,
depth,
kernel_size, ):
Seq, ActBn = nn.Sequential, lambda x: Seq(x, nn.GELU(), nn.BatchNorm2D(dim))
Residual = type('Residual', (Seq, ),
{'forward': lambda self, x: self[0](x) + x})
return Seq(* [
Seq(Residual(
ActBn(
nn.Conv2D(
dim, dim, kernel_size, groups=dim, padding="same"))),
ActBn(nn.Conv2D(dim, dim, 1))) for i in range(depth)
])
| PaddleDetection/ppdet/modeling/layers.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/layers.py",
"repo_id": "PaddleDetection",
"token_count": 26594
} | 75 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
__all__ = ['ProbIoULoss']
def gbb_form(boxes):
xy, wh, angle = paddle.split(boxes, [2, 2, 1], axis=-1)
return paddle.concat([xy, wh.pow(2) / 12., angle], axis=-1)
def rotated_form(a_, b_, angles):
cos_a = paddle.cos(angles)
sin_a = paddle.sin(angles)
a = a_ * paddle.pow(cos_a, 2) + b_ * paddle.pow(sin_a, 2)
b = a_ * paddle.pow(sin_a, 2) + b_ * paddle.pow(cos_a, 2)
c = (a_ - b_) * cos_a * sin_a
return a, b, c
def probiou_loss(pred, target, eps=1e-3, mode='l1'):
"""
pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0
target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0
eps -> threshold to avoid infinite values
mode -> ('l1' in [0,1] or 'l2' in [0,inf]) metrics according our paper
"""
gbboxes1 = gbb_form(pred)
gbboxes2 = gbb_form(target)
x1, y1, a1_, b1_, c1_ = gbboxes1[:,
0], gbboxes1[:,
1], gbboxes1[:,
2], gbboxes1[:,
3], gbboxes1[:,
4]
x2, y2, a2_, b2_, c2_ = gbboxes2[:,
0], gbboxes2[:,
1], gbboxes2[:,
2], gbboxes2[:,
3], gbboxes2[:,
4]
a1, b1, c1 = rotated_form(a1_, b1_, c1_)
a2, b2, c2 = rotated_form(a2_, b2_, c2_)
t1 = 0.25 * ((a1 + a2) * (paddle.pow(y1 - y2, 2)) + (b1 + b2) * (paddle.pow(x1 - x2, 2))) + \
0.5 * ((c1+c2)*(x2-x1)*(y1-y2))
t2 = (a1 + a2) * (b1 + b2) - paddle.pow(c1 + c2, 2)
t3_ = (a1 * b1 - c1 * c1) * (a2 * b2 - c2 * c2)
t3 = 0.5 * paddle.log(t2 / (4 * paddle.sqrt(F.relu(t3_)) + eps))
B_d = (t1 / t2) + t3
# B_d = t1 + t2 + t3
B_d = paddle.clip(B_d, min=eps, max=100.0)
l1 = paddle.sqrt(1.0 - paddle.exp(-B_d) + eps)
l_i = paddle.pow(l1, 2.0)
l2 = -paddle.log(1.0 - l_i + eps)
if mode == 'l1':
probiou = l1
if mode == 'l2':
probiou = l2
return probiou
@serializable
@register
class ProbIoULoss(object):
""" ProbIoU Loss, refer to https://arxiv.org/abs/2106.06072 for details """
def __init__(self, mode='l1', eps=1e-3):
super(ProbIoULoss, self).__init__()
self.mode = mode
self.eps = eps
def __call__(self, pred_rboxes, assigned_rboxes):
return probiou_loss(pred_rboxes, assigned_rboxes, self.eps, self.mode)
| PaddleDetection/ppdet/modeling/losses/probiou_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/losses/probiou_loss.py",
"repo_id": "PaddleDetection",
"token_count": 2050
} | 76 |
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this code is base on mmdet: git@github.com:open-mmlab/mmdetection.git
"""
import paddle.nn as nn
from ppdet.core.workspace import register, serializable
from ..backbones.hrnet import ConvNormLayer
from ..shape_spec import ShapeSpec
from ..initializer import xavier_uniform_, constant_
__all__ = ['ChannelMapper']
@register
@serializable
class ChannelMapper(nn.Layer):
"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
act_cfg (dict, optional): Config dict for activation layer in
ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There
would be extra_convs when num_outs larger than the length
of in_channels.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
norm_type="gn",
norm_groups=32,
act='relu',
num_outs=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(ChannelMapper, self).__init__()
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.LayerList()
for in_channel in in_channels:
self.convs.append(
ConvNormLayer(
ch_in=in_channel,
ch_out=out_channels,
filter_size=kernel_size,
norm_type='gn',
norm_groups=32,
act=act))
if num_outs > len(in_channels):
self.extra_convs = nn.LayerList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvNormLayer(
ch_in=in_channel,
ch_out=out_channels,
filter_size=3,
stride=2,
norm_type='gn',
norm_groups=32,
act=act))
self.init_weights()
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
@property
def out_shape(self):
return [
ShapeSpec(
channels=self.out_channel, stride=1. / s)
for s in self.spatial_scales
]
def init_weights(self):
"""Initialize the transformer weights."""
for p in self.parameters():
if p.rank() > 1:
xavier_uniform_(p)
if hasattr(p, 'bias') and p.bias is not None:
constant_(p.bais)
| PaddleDetection/ppdet/modeling/necks/channel_mapper.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/necks/channel_mapper.py",
"repo_id": "PaddleDetection",
"token_count": 2193
} | 77 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from ppdet.core.workspace import register, serializable
from .. import ops
@register
@serializable
class ProposalGenerator(object):
"""
Proposal generation module
For more details, please refer to the document of generate_proposals
in ppdet/modeing/ops.py
Args:
pre_nms_top_n (int): Number of total bboxes to be kept per
image before NMS. default 6000
post_nms_top_n (int): Number of total bboxes to be kept per
image after NMS. default 1000
nms_thresh (float): Threshold in NMS. default 0.5
min_size (flaot): Remove predicted boxes with either height or
width < min_size. default 0.1
eta (float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
default 1.
topk_after_collect (bool): whether to adopt topk after batch
collection. If topk_after_collect is true, box filter will not be
used after NMS at each image in proposal generation. default false
"""
def __init__(self,
pre_nms_top_n=12000,
post_nms_top_n=2000,
nms_thresh=.5,
min_size=.1,
eta=1.,
topk_after_collect=False):
super(ProposalGenerator, self).__init__()
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = min_size
self.eta = eta
self.topk_after_collect = topk_after_collect
def __call__(self, scores, bbox_deltas, anchors, im_shape):
top_n = self.pre_nms_top_n if self.topk_after_collect else self.post_nms_top_n
variances = paddle.ones_like(anchors)
if hasattr(paddle.vision.ops, "generate_proposals"):
generate_proposals = getattr(paddle.vision.ops,
"generate_proposals")
else:
generate_proposals = ops.generate_proposals
rpn_rois, rpn_rois_prob, rpn_rois_num = generate_proposals(
scores,
bbox_deltas,
im_shape,
anchors,
variances,
pre_nms_top_n=self.pre_nms_top_n,
post_nms_top_n=top_n,
nms_thresh=self.nms_thresh,
min_size=self.min_size,
eta=self.eta,
return_rois_num=True)
return rpn_rois, rpn_rois_prob, rpn_rois_num, self.post_nms_top_n
| PaddleDetection/ppdet/modeling/proposal_generator/proposal_generator.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/proposal_generator/proposal_generator.py",
"repo_id": "PaddleDetection",
"token_count": 1433
} | 78 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import random
import numpy as np
import paddle
# add python path of PaddleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 5)))
if parent_path not in sys.path:
sys.path.append(parent_path)
from ppdet.modeling.transformers.utils import deformable_attention_core_func
ms_deform_attn_core_paddle = deformable_attention_core_func
try:
gpu_index = int(sys.argv[1])
except:
gpu_index = 0
print(f'Use gpu {gpu_index} to test...')
paddle.set_device(f'gpu:{gpu_index}')
try:
from deformable_detr_ops import ms_deformable_attn
except Exception as e:
print('import deformable_detr_ops error', e)
sys.exit(-1)
paddle.seed(1)
random.seed(1)
np.random.seed(1)
bs, n_heads, c = 2, 8, 8
query_length, n_levels, n_points = 2, 2, 2
spatial_shapes = paddle.to_tensor([(6, 4), (3, 2)], dtype=paddle.int64)
level_start_index = paddle.concat((paddle.to_tensor(
[0], dtype=paddle.int64), spatial_shapes.prod(1).cumsum(0)[:-1]))
value_length = sum([(H * W).item() for H, W in spatial_shapes])
def get_test_tensors(channels):
value = paddle.rand(
[bs, value_length, n_heads, channels], dtype=paddle.float32) * 0.01
sampling_locations = paddle.rand(
[bs, query_length, n_heads, n_levels, n_points, 2],
dtype=paddle.float32)
attention_weights = paddle.rand(
[bs, query_length, n_heads, n_levels, n_points],
dtype=paddle.float32) + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(
-2, keepdim=True)
return [value, sampling_locations, attention_weights]
@paddle.no_grad()
def check_forward_equal_with_paddle_float():
value, sampling_locations, attention_weights = get_test_tensors(c)
output_paddle = ms_deform_attn_core_paddle(
value, spatial_shapes, level_start_index, sampling_locations,
attention_weights).detach().cpu()
output_cuda = ms_deformable_attn(value, spatial_shapes, level_start_index,
sampling_locations,
attention_weights).detach().cpu()
fwdok = paddle.allclose(
output_cuda, output_paddle, rtol=1e-2, atol=1e-3).item()
max_abs_err = (output_cuda - output_paddle).abs().max().item()
max_rel_err = (
(output_cuda - output_paddle).abs() / output_paddle.abs()).max().item()
print(
f'*{fwdok} check_forward_equal_with_paddle_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}'
)
def check_gradient_numerical(channels=4):
value_paddle, sampling_locations_paddle, attention_weights_paddle = get_test_tensors(
channels)
value_paddle.stop_gradient = False
sampling_locations_paddle.stop_gradient = False
attention_weights_paddle.stop_gradient = False
value_cuda = value_paddle.detach().clone()
sampling_locations_cuda = sampling_locations_paddle.detach().clone()
attention_weights_cuda = attention_weights_paddle.detach().clone()
value_cuda.stop_gradient = False
sampling_locations_cuda.stop_gradient = False
attention_weights_cuda.stop_gradient = False
output_paddle = ms_deform_attn_core_paddle(
value_paddle, spatial_shapes, level_start_index,
sampling_locations_paddle, attention_weights_paddle)
output_paddle.sum().backward()
output_cuda = ms_deformable_attn(value_cuda, spatial_shapes,
level_start_index, sampling_locations_cuda,
attention_weights_cuda)
output_cuda.sum().backward()
res = paddle.allclose(
value_paddle.grad, value_cuda.grad, rtol=1e-2, atol=1e-3).item()
print(f'*tensor1 {res} check_gradient_numerical(D={channels})')
res = paddle.allclose(
sampling_locations_paddle.grad,
sampling_locations_cuda.grad,
rtol=1e-2,
atol=1e-3).item()
print(f'*tensor2 {res} check_gradient_numerical(D={channels})')
res = paddle.allclose(
attention_weights_paddle.grad,
attention_weights_cuda.grad,
rtol=1e-2,
atol=1e-3).item()
print(f'*tensor3 {res} check_gradient_numerical(D={channels})')
if __name__ == '__main__':
check_forward_equal_with_paddle_float()
for channels in [30, 32, 64, 71, 128, 1024, 1025, 2048, 3096]:
check_gradient_numerical(channels)
| PaddleDetection/ppdet/modeling/transformers/ext_op/test_ms_deformable_attn_op.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/transformers/ext_op/test_ms_deformable_attn_op.py",
"repo_id": "PaddleDetection",
"token_count": 2100
} | 79 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
from ppdet.core.workspace import register, create, load_config
from ppdet.utils.checkpoint import load_pretrain_weight
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = [
'DistillModel',
'FGDDistillModel',
'CWDDistillModel',
'LDDistillModel',
'PPYOLOEDistillModel',
]
@register
class DistillModel(nn.Layer):
"""
Build common distill model.
Args:
cfg: The student config.
slim_cfg: The teacher and distill config.
"""
def __init__(self, cfg, slim_cfg):
super(DistillModel, self).__init__()
self.arch = cfg.architecture
self.stu_cfg = cfg
self.student_model = create(self.stu_cfg.architecture)
if 'pretrain_weights' in self.stu_cfg and self.stu_cfg.pretrain_weights:
stu_pretrain = self.stu_cfg.pretrain_weights
else:
stu_pretrain = None
slim_cfg = load_config(slim_cfg)
self.tea_cfg = slim_cfg
self.teacher_model = create(self.tea_cfg.architecture)
if 'pretrain_weights' in self.tea_cfg and self.tea_cfg.pretrain_weights:
tea_pretrain = self.tea_cfg.pretrain_weights
else:
tea_pretrain = None
self.distill_cfg = slim_cfg
# load pretrain weights
self.is_inherit = False
if stu_pretrain:
if self.is_inherit and tea_pretrain:
load_pretrain_weight(self.student_model, tea_pretrain)
logger.debug(
"Inheriting! loading teacher weights to student model!")
load_pretrain_weight(self.student_model, stu_pretrain)
logger.info("Student model has loaded pretrain weights!")
if tea_pretrain:
load_pretrain_weight(self.teacher_model, tea_pretrain)
logger.info("Teacher model has loaded pretrain weights!")
self.teacher_model.eval()
for param in self.teacher_model.parameters():
param.trainable = False
self.distill_loss = self.build_loss(self.distill_cfg)
def build_loss(self, distill_cfg):
if 'distill_loss' in distill_cfg and distill_cfg.distill_loss:
return create(distill_cfg.distill_loss)
else:
return None
def parameters(self):
return self.student_model.parameters()
def forward(self, inputs):
if self.training:
student_loss = self.student_model(inputs)
with paddle.no_grad():
teacher_loss = self.teacher_model(inputs)
loss = self.distill_loss(self.teacher_model, self.student_model)
student_loss['distill_loss'] = loss
student_loss['teacher_loss'] = teacher_loss['loss']
student_loss['loss'] += student_loss['distill_loss']
return student_loss
else:
return self.student_model(inputs)
@register
class FGDDistillModel(DistillModel):
"""
Build FGD distill model.
Args:
cfg: The student config.
slim_cfg: The teacher and distill config.
"""
def __init__(self, cfg, slim_cfg):
super(FGDDistillModel, self).__init__(cfg=cfg, slim_cfg=slim_cfg)
assert self.arch in ['RetinaNet', 'PicoDet'
], 'Unsupported arch: {}'.format(self.arch)
self.is_inherit = True
def build_loss(self, distill_cfg):
assert 'distill_loss_name' in distill_cfg and distill_cfg.distill_loss_name
assert 'distill_loss' in distill_cfg and distill_cfg.distill_loss
loss_func = dict()
name_list = distill_cfg.distill_loss_name
for name in name_list:
loss_func[name] = create(distill_cfg.distill_loss)
return loss_func
def forward(self, inputs):
if self.training:
s_body_feats = self.student_model.backbone(inputs)
s_neck_feats = self.student_model.neck(s_body_feats)
with paddle.no_grad():
t_body_feats = self.teacher_model.backbone(inputs)
t_neck_feats = self.teacher_model.neck(t_body_feats)
loss_dict = {}
for idx, k in enumerate(self.distill_loss):
loss_dict[k] = self.distill_loss[k](s_neck_feats[idx],
t_neck_feats[idx], inputs)
if self.arch == "RetinaNet":
loss = self.student_model.head(s_neck_feats, inputs)
elif self.arch == "PicoDet":
head_outs = self.student_model.head(
s_neck_feats, self.student_model.export_post_process)
loss_gfl = self.student_model.head.get_loss(head_outs, inputs)
total_loss = paddle.add_n(list(loss_gfl.values()))
loss = {}
loss.update(loss_gfl)
loss.update({'loss': total_loss})
else:
raise ValueError(f"Unsupported model {self.arch}")
for k in loss_dict:
loss['loss'] += loss_dict[k]
loss[k] = loss_dict[k]
return loss
else:
body_feats = self.student_model.backbone(inputs)
neck_feats = self.student_model.neck(body_feats)
head_outs = self.student_model.head(neck_feats)
if self.arch == "RetinaNet":
bbox, bbox_num = self.student_model.head.post_process(
head_outs, inputs['im_shape'], inputs['scale_factor'])
return {'bbox': bbox, 'bbox_num': bbox_num}
elif self.arch == "PicoDet":
head_outs = self.student_model.head(
neck_feats, self.student_model.export_post_process)
scale_factor = inputs['scale_factor']
bboxes, bbox_num = self.student_model.head.post_process(
head_outs,
scale_factor,
export_nms=self.student_model.export_nms)
return {'bbox': bboxes, 'bbox_num': bbox_num}
else:
raise ValueError(f"Unsupported model {self.arch}")
@register
class CWDDistillModel(DistillModel):
"""
Build CWD distill model.
Args:
cfg: The student config.
slim_cfg: The teacher and distill config.
"""
def __init__(self, cfg, slim_cfg):
super(CWDDistillModel, self).__init__(cfg=cfg, slim_cfg=slim_cfg)
assert self.arch in ['GFL', 'RetinaNet'], 'Unsupported arch: {}'.format(
self.arch)
def build_loss(self, distill_cfg):
assert 'distill_loss_name' in distill_cfg and distill_cfg.distill_loss_name
assert 'distill_loss' in distill_cfg and distill_cfg.distill_loss
loss_func = dict()
name_list = distill_cfg.distill_loss_name
for name in name_list:
loss_func[name] = create(distill_cfg.distill_loss)
return loss_func
def get_loss_retinanet(self, stu_fea_list, tea_fea_list, inputs):
loss = self.student_model.head(stu_fea_list, inputs)
loss_dict = {}
for idx, k in enumerate(self.distill_loss):
loss_dict[k] = self.distill_loss[k](stu_fea_list[idx],
tea_fea_list[idx])
loss['loss'] += loss_dict[k]
loss[k] = loss_dict[k]
return loss
def get_loss_gfl(self, stu_fea_list, tea_fea_list, inputs):
loss = {}
head_outs = self.student_model.head(stu_fea_list)
loss_gfl = self.student_model.head.get_loss(head_outs, inputs)
loss.update(loss_gfl)
total_loss = paddle.add_n(list(loss.values()))
loss.update({'loss': total_loss})
feat_loss = {}
loss_dict = {}
s_cls_feat, t_cls_feat = [], []
for s_neck_f, t_neck_f in zip(stu_fea_list, tea_fea_list):
conv_cls_feat, _ = self.student_model.head.conv_feat(s_neck_f)
cls_score = self.student_model.head.gfl_head_cls(conv_cls_feat)
t_conv_cls_feat, _ = self.teacher_model.head.conv_feat(t_neck_f)
t_cls_score = self.teacher_model.head.gfl_head_cls(t_conv_cls_feat)
s_cls_feat.append(cls_score)
t_cls_feat.append(t_cls_score)
for idx, k in enumerate(self.distill_loss):
loss_dict[k] = self.distill_loss[k](s_cls_feat[idx],
t_cls_feat[idx])
feat_loss[f"neck_f_{idx}"] = self.distill_loss[k](stu_fea_list[idx],
tea_fea_list[idx])
for k in feat_loss:
loss['loss'] += feat_loss[k]
loss[k] = feat_loss[k]
for k in loss_dict:
loss['loss'] += loss_dict[k]
loss[k] = loss_dict[k]
return loss
def forward(self, inputs):
if self.training:
s_body_feats = self.student_model.backbone(inputs)
s_neck_feats = self.student_model.neck(s_body_feats)
with paddle.no_grad():
t_body_feats = self.teacher_model.backbone(inputs)
t_neck_feats = self.teacher_model.neck(t_body_feats)
if self.arch == "RetinaNet":
loss = self.get_loss_retinanet(s_neck_feats, t_neck_feats,
inputs)
elif self.arch == "GFL":
loss = self.get_loss_gfl(s_neck_feats, t_neck_feats, inputs)
else:
raise ValueError(f"unsupported arch {self.arch}")
return loss
else:
body_feats = self.student_model.backbone(inputs)
neck_feats = self.student_model.neck(body_feats)
head_outs = self.student_model.head(neck_feats)
if self.arch == "RetinaNet":
bbox, bbox_num = self.student_model.head.post_process(
head_outs, inputs['im_shape'], inputs['scale_factor'])
return {'bbox': bbox, 'bbox_num': bbox_num}
elif self.arch == "GFL":
bbox_pred, bbox_num = head_outs
output = {'bbox': bbox_pred, 'bbox_num': bbox_num}
return output
else:
raise ValueError(f"unsupported arch {self.arch}")
@register
class LDDistillModel(DistillModel):
"""
Build LD distill model.
Args:
cfg: The student config.
slim_cfg: The teacher and distill config.
"""
def __init__(self, cfg, slim_cfg):
super(LDDistillModel, self).__init__(cfg=cfg, slim_cfg=slim_cfg)
assert self.arch in ['GFL'], 'Unsupported arch: {}'.format(self.arch)
def forward(self, inputs):
if self.training:
s_body_feats = self.student_model.backbone(inputs)
s_neck_feats = self.student_model.neck(s_body_feats)
s_head_outs = self.student_model.head(s_neck_feats)
with paddle.no_grad():
t_body_feats = self.teacher_model.backbone(inputs)
t_neck_feats = self.teacher_model.neck(t_body_feats)
t_head_outs = self.teacher_model.head(t_neck_feats)
soft_label_list = t_head_outs[0]
soft_targets_list = t_head_outs[1]
student_loss = self.student_model.head.get_loss(
s_head_outs, inputs, soft_label_list, soft_targets_list)
total_loss = paddle.add_n(list(student_loss.values()))
student_loss['loss'] = total_loss
return student_loss
else:
return self.student_model(inputs)
@register
class PPYOLOEDistillModel(DistillModel):
"""
Build PPYOLOE distill model, only used in PPYOLOE
Args:
cfg: The student config.
slim_cfg: The teacher and distill config.
"""
def __init__(self, cfg, slim_cfg):
super(PPYOLOEDistillModel, self).__init__(cfg=cfg, slim_cfg=slim_cfg)
assert self.arch in ['PPYOLOE'], 'Unsupported arch: {}'.format(
self.arch)
def forward(self, inputs, alpha=0.125):
if self.training:
with paddle.no_grad():
teacher_loss = self.teacher_model(inputs)
if hasattr(self.teacher_model.yolo_head, "assigned_labels"):
self.student_model.yolo_head.assigned_labels, self.student_model.yolo_head.assigned_bboxes, self.student_model.yolo_head.assigned_scores = \
self.teacher_model.yolo_head.assigned_labels, self.teacher_model.yolo_head.assigned_bboxes, self.teacher_model.yolo_head.assigned_scores
delattr(self.teacher_model.yolo_head, "assigned_labels")
delattr(self.teacher_model.yolo_head, "assigned_bboxes")
delattr(self.teacher_model.yolo_head, "assigned_scores")
student_loss = self.student_model(inputs)
logits_loss, feat_loss = self.distill_loss(self.teacher_model,
self.student_model)
det_total_loss = student_loss['loss']
total_loss = alpha * (det_total_loss + logits_loss + feat_loss)
student_loss['loss'] = total_loss
student_loss['det_loss'] = det_total_loss
student_loss['logits_loss'] = logits_loss
student_loss['feat_loss'] = feat_loss
return student_loss
else:
return self.student_model(inputs)
| PaddleDetection/ppdet/slim/distill_model.py/0 | {
"file_path": "PaddleDetection/ppdet/slim/distill_model.py",
"repo_id": "PaddleDetection",
"token_count": 7632
} | 80 |
## 1. 环境准备
本教程适用于test_tipc目录下基础功能测试的运行环境搭建。
推荐环境:
- CUDA 10.1/10.2
- CUDNN 7.6/cudnn8.1
- TensorRT 6.1.0.5 / 7.1 / 7.2
环境配置可以选择docker镜像安装,或者在本地环境Python搭建环境。推荐使用docker镜像安装,避免不必要的环境配置。
## 2. Docker 镜像安装
推荐docker镜像安装,按照如下命令创建镜像,当前目录映射到镜像中的`/paddle`目录下
```
# 启动docker镜像
nvidia-docker run --name paddle -it -v $PWD:/paddle paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7-gcc82-dev /bin/bash
cd /paddle
```
```
# 编译安装Paddle
git clone https://github.com/PaddlePaddle/Paddle.git
cd Paddle
mkdir build && cd build
cmake .. \
-DWITH_MKL=ON \
-DWITH_MKLDNN=ON \
-DWITH_GPU=ON \
-DWITH_DISTRIBUTE=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCUDA_ARCH_NAME=Auto \
-DPY_VERSION=3.7 \
-DON_INFER=ON \
-DWITH_TENSORRT=ON \
-DTENSORRT_ROOT=/usr/local/TensorRT6-cuda10.1-cudnn7
make -j 20
pip3.7 install python/dist/paddlepaddle_gpu-0.0.0-cp37-cp37m-linux_x86_64.whl
cd ../../
```
or
```
# 下载安装Paddle-2.2
wget https://paddle-inference-lib.bj.bcebos.com/2.2.0/python/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddlepaddle_gpu-2.2.0.post101-cp37-cp37m-linux_x86_64.whl
pip3.7 install paddlepaddle_gpu-2.2.0.post101-cp37-cp37m-linux_x86_64.whl
# 下载C++预测库用于C++ inference
wget https://paddle-inference-lib.bj.bcebos.com/2.2.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz
tar -xvf paddle_inference.tgz
export PADDLE_DIR=/paddle/paddle_inference
```
## 3 Python 环境构建
如果您已经通过docker方式构建环境,跳过该部分内容。非docker环境下,环境配置比较灵活,推荐环境组合配置:
- CUDA10.1 + CUDNN7.6 + TensorRT 6
- CUDA10.2 + CUDNN8.1 + TensorRT 7
- CUDA11.1 + CUDNN8.1 + TensorRT 7
下面以 CUDA10.2 + CUDNN8.1 + TensorRT 7 配置为例,介绍环境配置的流程。
### 3.1 安装CUDNN
如果当前环境满足CUDNN版本的要求,可以跳过此步骤。
以CUDNN8.1 安装安装为例,安装步骤如下,首先下载CUDNN,从[Nvidia官网](https://developer.nvidia.com/rdp/cudnn-archive)下载CUDNN8.1版本,下载符合当前系统版本的三个deb文件,分别是:
- cuDNN Runtime Library ,如:libcudnn8_8.1.0.77-1+cuda10.2_amd64.deb
- cuDNN Developer Library ,如:libcudnn8-dev_8.1.0.77-1+cuda10.2_amd64.deb
- cuDNN Code Samples,如:libcudnn8-samples_8.1.0.77-1+cuda10.2_amd64.deb
deb安装可以参考[官方文档](https://docs.nvidia.com/deeplearning/cudnn/install-guide/index.html#installlinux-deb),安装方式如下
```
# x.x.x表示下载的版本号
# $HOME为工作目录
sudo dpkg -i libcudnn8_x.x.x-1+cudax.x_arm64.deb
sudo dpkg -i libcudnn8-dev_8.x.x.x-1+cudax.x_arm64.deb
sudo dpkg -i libcudnn8-samples_8.x.x.x-1+cudax.x_arm64.deb
# 验证是否正确安装
cp -r /usr/src/cudnn_samples_v8/ $HOME
cd $HOME/cudnn_samples_v8/mnistCUDNN
# 编译
make clean && make
./mnistCUDNN
```
如果运行mnistCUDNN完后提示运行成功,则表示安装成功。如果运行后出现freeimage相关的报错,需要按照提示安装freeimage库:
```
sudo apt-get install libfreeimage-dev
sudo apt-get install libfreeimage
```
### 3.2 安装TensorRT
首先,从[Nvidia官网TensorRT板块](https://developer.nvidia.com/tensorrt-getting-started)下载TensorRT,这里选择7.1.3.4版本的TensorRT,注意选择适合自己系统版本和CUDA版本的TensorRT,另外建议下载TAR package的安装包。
以Ubuntu16.04+CUDA10.2为例,下载并解压后可以参考[官方文档](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-713/install-guide/index.html#installing-tar)的安装步骤,按照如下步骤安装:
```
# 以下安装命令中 '${version}' 为下载的TensorRT版本,如7.1.3.4
# 设置环境变量,<TensorRT-${version}/lib> 为解压后的TensorRT的lib目录
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<TensorRT-${version}/lib>
# 安装TensorRT
cd TensorRT-${version}/python
pip3.7 install tensorrt-*-cp3x-none-linux_x86_64.whl
# 安装graphsurgeon
cd TensorRT-${version}/graphsurgeon
```
### 3.3 安装PaddlePaddle
下载支持TensorRT版本的Paddle安装包,注意安装包的TensorRT版本需要与本地TensorRT一致,下载[链接](https://paddleinference.paddlepaddle.org.cn/master/user_guides/download_lib.html#python)
选择下载 linux-cuda10.2-trt7-gcc8.2 Python3.7版本的Paddle:
```
# 从下载链接中可以看到是paddle2.1.1-cuda10.2-cudnn8.1版本
wget https://paddle-wheel.bj.bcebos.com/with-trt/2.1.1-gpu-cuda10.2-cudnn8.1-mkl-gcc8.2/paddlepaddle_gpu-2.1.1-cp37-cp37m-linux_x86_64.whl
pip3.7 install -U paddlepaddle_gpu-2.1.1-cp37-cp37m-linux_x86_64.whl
```
## 4. 安装PaddleDetection依赖
```
# 安装AutoLog
git clone https://github.com/LDOUBLEV/AutoLog
cd AutoLog
pip3.7 install -r requirements.txt
python3.7 setup.py bdist_wheel
pip3.7 install ./dist/auto_log-1.0.0-py3-none-any.whl
# 下载PaddleDetection代码
cd ../
git clone https://github.com/PaddlePaddle/PaddleDetection
```
安装PaddleDetection依赖:
```
cd PaddleDetection
pip3.7 install -r ./requirements.txt
```
## FAQ :
Q. You are using Paddle compiled with TensorRT, but TensorRT dynamic library is not found. Ignore this if TensorRT is not needed.
A. 问题一般是当前安装paddle版本带TRT,但是本地环境找不到TensorRT的预测库,需要下载TensorRT库,解压后设置环境变量LD_LIBRARY_PATH;
如:
```
export LD_LIBRARY_PATH=/usr/local/python3.7.0/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/paddle/package/TensorRT-6.0.1.5/lib
```
或者问题是下载的TensorRT版本和当前paddle中编译的TRT版本不匹配,需要下载版本相符的TensorRT重新安装。
| PaddleDetection/test_tipc/docs/install.md/0 | {
"file_path": "PaddleDetection/test_tipc/docs/install.md",
"repo_id": "PaddleDetection",
"token_count": 3323
} | 81 |
#!/bin/bash
source test_tipc/utils_func.sh
FILENAME=$1
MODE="serving_infer"
# parser model_name
dataline=$(cat ${FILENAME})
IFS=$'\n'
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
echo "ppdet serving_cpp_infer: ${model_name}"
python=$(func_parser_value "${lines[2]}")
filename_key=$(func_parser_key "${lines[3]}")
filename_value=$(func_parser_value "${lines[3]}")
# parser export params
save_export_key=$(func_parser_key "${lines[5]}")
save_export_value=$(func_parser_value "${lines[5]}")
export_weight_key=$(func_parser_key "${lines[6]}")
export_weight_value=$(func_parser_value "${lines[6]}")
norm_export=$(func_parser_value "${lines[7]}")
pact_export=$(func_parser_value "${lines[8]}")
fpgm_export=$(func_parser_value "${lines[9]}")
distill_export=$(func_parser_value "${lines[10]}")
export_key1=$(func_parser_key "${lines[11]}")
export_value1=$(func_parser_value "${lines[11]}")
export_key2=$(func_parser_key "${lines[12]}")
export_value2=$(func_parser_value "${lines[12]}")
kl_quant_export=$(func_parser_value "${lines[13]}")
# parser serving params
infer_mode_list=$(func_parser_value "${lines[15]}")
infer_is_quant_list=$(func_parser_value "${lines[16]}")
model_key=$(func_parser_key "${lines[17]}")
op_key=$(func_parser_key "${lines[18]}")
op_value=$(func_parser_value "${lines[18]}")
port_key=$(func_parser_key "${lines[19]}")
port_value=$(func_parser_value "${lines[19]}")
gpu_ids_key=$(func_parser_key "${lines[20]}")
gpu_ids_value=$(func_parser_value "${lines[20]}")
web_service_key1=$(func_parser_key "${lines[21]}")
web_service_value1=$(func_parser_value "${lines[21]}")
http_client_py=$(func_parser_value "${lines[22]}")
serving_client_key=$(func_parser_key "${lines[23]}")
infer_image_key=$(func_parser_key "${lines[24]}")
infer_image_value=$(func_parser_value "${lines[24]}")
http_client_key1=$(func_parser_key "${lines[25]}")
http_client_value1=$(func_parser_value "${lines[25]}")
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_serving_cpp.log"
function func_serving_inference(){
IFS='|'
_python=$1
_log_path=$2
_set_server_model_dir=$3
_set_client_model_dir=$4
_set_image_file=$5
set_op=$(func_set_params "${op_key}" "${op_value}")
set_port=$(func_set_params "${port_key}" "${port_value}")
set_web_service_params1=$(func_set_params "${web_service_key1}" "${web_service_value1}")
set_http_client_params1=$(func_set_params "${http_client_key1}" "${http_client_value1}")
# inference
for gpu_ids in ${gpu_ids_value[*]}; do
if [ ${gpu_ids} = "null" ];then
server_log_path="${_log_path}/cpp_server_cpu.log"
client_log_path="${_log_path}/cpp_client_cpu.log"
else
server_log_path="${_log_path}/cpp_server_gpu.log"
client_log_path="${_log_path}/cpp_client_gpu.log"
fi
set_gpu_ids=$(func_set_params "${gpu_ids_key}" "${gpu_ids}")
# run web service
web_service_cmd="${_python} -m paddle_serving_server.serve ${_set_server_model_dir} ${set_op} ${set_port} ${set_gpu_ids} ${set_web_service_params1} > ${server_log_path} 2>&1 &"
eval $web_service_cmd
last_status=${PIPESTATUS[0]}
cat ${server_log_path}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
# run http client
http_client_cmd="${_python} ${http_client_py} ${_set_client_model_dir} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1"
eval $http_client_cmd
last_status=${PIPESTATUS[0]}
cat ${client_log_path}
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}"
ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9
sleep 2s
done
}
# run serving infer
Count=0
IFS="|"
infer_quant_flag=(${infer_is_quant_list})
for infer_mode in ${infer_mode_list[*]}; do
if [ ${infer_mode} != "null" ]; then
# run export
case ${infer_mode} in
norm) run_export=${norm_export} ;;
quant) run_export=${pact_export} ;;
fpgm) run_export=${fpgm_export} ;;
distill) run_export=${distill_export} ;;
kl_quant) run_export=${kl_quant_export} ;;
*) echo "Undefined infer_mode!"; exit 1;
esac
set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}")
set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}")
set_filename=$(func_set_params "${filename_key}" "${model_name}")
export_log_path="${LOG_PATH}/export.log"
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
echo $export_cmd
eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$?
cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi
#run inference
set_server_model_dir=$(func_set_params "${model_key}" "${save_export_value}/${model_name}/serving_server")
set_client_model_dir=$(func_set_params "${serving_client_key}" "${save_export_value}/${model_name}/serving_client")
set_infer_image_file=$(func_set_params "${infer_image_key}" "${infer_image_value}")
is_quant=${infer_quant_flag[Count]}
func_serving_inference "${python}" "${LOG_PATH}" "${set_server_model_dir}" "${set_client_model_dir}" ${set_infer_image_file}
Count=$(($Count + 1))
done
eval "unset CUDA_VISIBLE_DEVICES"
| PaddleDetection/test_tipc/test_serving_infer_cpp.sh/0 | {
"file_path": "PaddleDetection/test_tipc/test_serving_infer_cpp.sh",
"repo_id": "PaddleDetection",
"token_count": 2461
} | 82 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# add python path of PaddleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
# ignore warning log
import warnings
warnings.filterwarnings('ignore')
import paddle
from ppdet.core.workspace import load_config, merge_config
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser
from ppdet.engine import Trainer
from ppdet.slim import build_slim_model
from ppdet.utils.logger import setup_logger
logger = setup_logger('post_quant')
def parse_args():
parser = ArgsParser()
parser.add_argument(
"--output_dir",
type=str,
default="output_inference",
help="Directory for storing the output model files.")
parser.add_argument(
"--slim_config",
default=None,
type=str,
help="Configuration file of slim method.")
args = parser.parse_args()
return args
def run(FLAGS, cfg):
# build detector
trainer = Trainer(cfg, mode='eval')
# load weights
if cfg.architecture in ['DeepSORT']:
if cfg.det_weights != 'None':
trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights)
else:
trainer.load_weights_sde(None, cfg.reid_weights)
else:
trainer.load_weights(cfg.weights)
# post quant model
trainer.post_quant(FLAGS.output_dir)
def main():
FLAGS = parse_args()
cfg = load_config(FLAGS.config)
# TODO: to be refined in the future
if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn':
FLAGS.opt['norm_type'] = 'bn'
merge_config(FLAGS.opt)
if FLAGS.slim_config:
cfg = build_slim_model(cfg, FLAGS.slim_config, mode='test')
# FIXME: Temporarily solve the priority problem of FLAGS.opt
merge_config(FLAGS.opt)
check_config(cfg)
if 'use_gpu' not in cfg:
cfg.use_gpu = False
check_gpu(cfg.use_gpu)
check_version()
run(FLAGS, cfg)
if __name__ == '__main__':
main()
| PaddleDetection/tools/post_quant.py/0 | {
"file_path": "PaddleDetection/tools/post_quant.py",
"repo_id": "PaddleDetection",
"token_count": 1027
} | 83 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="Python 3.10" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Remote Python 3.11.0rc1 Docker (tensorflow:runtime)" project-jdk-type="Python SDK" />
</project> | euryale/.idea/misc.xml/0 | {
"file_path": "euryale/.idea/misc.xml",
"repo_id": "euryale",
"token_count": 108
} | 84 |
#!/usr/bin/env python
import argparse
import torch
from transformers import GPTJForCausalLM, GPTJConfig
# Note: these need the git version of Transformers as of 7/22/2022
from transformers import CodeGenTokenizer, CodeGenForCausalLM # noqa: F401
from transformers import CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST
parser = argparse.ArgumentParser('Convert SalesForce CodeGen model to GPT-J')
parser.add_argument('--code_model',
choices=CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST, default='Salesforce/codegen-350M-multi',
help='which SalesForce model to convert'
)
parser.add_argument('output_dir', help='where to store the converted model')
args = parser.parse_args()
print('Loading CodeGen model')
cg_model = CodeGenForCausalLM.from_pretrained(args.code_model, torch_dtype="auto")
cg_config = cg_model.config
# Create empty GPTJ model
print('Creating empty GPTJ model')
config = GPTJConfig(
vocab_size=cg_config.vocab_size,
n_positions=cg_config.n_positions,
n_embd=cg_config.n_embd,
n_layer=cg_config.n_layer,
n_head=cg_config.n_head,
rotary_dim=cg_config.rotary_dim,
n_inner=cg_config.n_inner,
activation_function=cg_config.activation_function,
resid_pdrop=cg_config.resid_pdrop,
embd_pdrop=cg_config.embd_pdrop,
attn_pdrop=cg_config.attn_pdrop,
layer_norm_epsilon=cg_config.layer_norm_epsilon,
initializer_range=cg_config.initializer_range,
scale_attn_weights=cg_config.scale_attn_weights,
use_cache=cg_config.use_cache,
bos_token_id=cg_config.bos_token_id,
eos_token_id=cg_config.eos_token_id,
torch_dtype=cg_config.torch_dtype,
)
# Fix tokenizer type
config.tokenizer_class = 'CodeGenTokenizer'
gptj_model = GPTJForCausalLM(config).half()
embed_dim = config.n_embd
def replace(model, weights, model_name):
model.state_dict()[model_name].copy_(weights.detach())
def replace_by_name(dest_model, src_model, old_name, new_name):
assert old_name in src_model.state_dict()
assert new_name in dest_model.state_dict()
replace(model=dest_model, weights=src_model.state_dict()[old_name], model_name=new_name)
print('Converting...')
# Copy weights from CodeGen model
with torch.no_grad():
cg_model.eval()
gptj_model.eval()
for name, param in cg_model.named_parameters():
# print(f'Converting {name}')
# Handle the qkv weights separately because we need to split them
if 'qkv_proj' in name:
qkv_proj = param.detach().clone()
mp_num = 4 # number of cores on their TPU I guess?
local_dim = embed_dim // mp_num
# GPT-J and CodeGen slice up the qkv projection slightly differently.
# After a great deal of pain, I figured out that this permutation on
# the weights of the qkv_proj fixes it.
base_permutation = [0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]
permutation = torch.cat([torch.arange(i * local_dim, (i + 1) * local_dim) for i in base_permutation])
# NB: we permute the *rows* here because the computation is xA.T
new_qkv_proj = qkv_proj[permutation, :]
# NB: the name QKV is misleading here; they are actually stored in
# the order QVK
query, value, key = torch.split(new_qkv_proj, embed_dim, dim=0)
replace(gptj_model, query, name.replace('qkv_proj', 'q_proj'))
replace(gptj_model, key, name.replace('qkv_proj', 'k_proj'))
replace(gptj_model, value, name.replace('qkv_proj', 'v_proj'))
else:
replace_by_name(dest_model=gptj_model, src_model=cg_model, old_name=name, new_name=name)
print('Conversion complete.')
print(f"Saving model to {args.output_dir}...")
gptj_model.save_pretrained(args.output_dir)
| fauxpilot/converter/codegen_gptj_convert.py/0 | {
"file_path": "fauxpilot/converter/codegen_gptj_convert.py",
"repo_id": "fauxpilot",
"token_count": 1648
} | 85 |
#!/usr/bin/env python
import argparse
import os
from string import Template
from transformers import GPTJConfig, AutoTokenizer
import torch
def round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_TEMPLATE_PATH = os.path.join(SCRIPT_DIR, 'config_template.pbtxt')
# Generate a config file for a CodeGen model for use with Triton
parser = argparse.ArgumentParser('Create Triton config files for CodeGen models')
parser.add_argument('--template', default=CONFIG_TEMPLATE_PATH, help='Path to the config template')
parser.add_argument('--model_store', required=True, help='Path to the Triton model store')
parser.add_argument('--hf_model_dir', required=True, help='Path to HF model directory')
parser.add_argument('--tokenizer', default='Salesforce/codegen-16B-multi', help='Name or path to the tokenizer')
parser.add_argument('--rebase', default=None, help='Path to rebase the model store to (e.g. for Docker)')
parser.add_argument('-n', '--num_gpu', help='Number of GPUs to use', type=int, default=1)
args = parser.parse_args()
# Vars we need to fill in:
# name
# tensor_para_size
# max_seq_len
# is_half
# head_num
# size_per_head
# inter_size
# vocab_size
# start_id
# end_id
# decoder_layers
# name
# rotary_embedding
# checkpoint_path
# Global options
if args.hf_model_dir.endswith('/'):
args.hf_model_dir = args.hf_model_dir[:-1]
config = GPTJConfig.from_pretrained(args.hf_model_dir)
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
max_seq_len = config.n_positions
is_half = '1' if config.torch_dtype == torch.float16 else '0'
# Read in the template config file
with open(args.template, 'r') as f:
template = Template(f.read())
model_name = os.path.basename(args.hf_model_dir)
version = '1'
params = {}
params['tensor_para_size'] = args.num_gpu
params['name'] = model_name
params['max_seq_len'] = max_seq_len
params['is_half'] = is_half
params['head_num'] = config.n_head
params['size_per_head'] = config.n_embd // config.n_head
params['inter_size'] = 4*config.n_embd
# Vocab size *sometimes* gets rounded up to a multiple of 1024
params['vocab_size'] = tokenizer.vocab_size+len(tokenizer.get_added_vocab()) # round_up(tokenizer.vocab_size, 1024)
params['start_id'] = tokenizer.eos_token_id
params['end_id'] = tokenizer.eos_token_id
params['decoder_layers'] = config.n_layer
params['rotary_embedding'] = config.rotary_dim
# NOTE: this assumes that the model dir follows the format used by the other conversion scripts
model_dir = os.path.join(args.model_store, f'{model_name}-{args.num_gpu}gpu')
weights_path = os.path.join(model_dir, 'fastertransformer', f'{version}', f'{args.num_gpu}-gpu')
if args.rebase:
rebased_model_dir = os.path.join(args.rebase, f'{model_name}-{args.num_gpu}gpu')
rebased_weights_path = os.path.join(args.rebase, 'fastertransformer', f'{version}', f'{args.num_gpu}-gpu')
else:
rebased_model_dir = model_dir
rebased_weights_path = weights_path
params['checkpoint_path'] = rebased_weights_path
triton_config = template.substitute(params)
assert '${' not in triton_config
# Make directory structure
os.makedirs(weights_path, exist_ok=True)
# Write config file
config_path = os.path.join(model_dir, 'fastertransformer', 'config.pbtxt')
with open(config_path, 'w') as f:
f.write(triton_config)
print('==========================================================')
print(f'Created config file for {model_name}')
print(f' Config: {config_path}')
print(f' Weights: {weights_path}')
print(f' Store: {args.model_store}')
print(f' Rebase: {model_dir} => {args.rebase}')
print(f' Weights: {rebased_weights_path}')
print(f' Num GPU: {args.num_gpu}')
print('==========================================================')
| fauxpilot/converter/triton_config_gen.py/0 | {
"file_path": "fauxpilot/converter/triton_config_gen.py",
"repo_id": "fauxpilot",
"token_count": 1378
} | 86 |
import json
import random
import string
import time
import numpy as np
import tritonclient.grpc as client_util
from tokenizers import Tokenizer
from tritonclient.utils import np_to_triton_dtype, InferenceServerException
np.finfo(np.dtype("float32"))
np.finfo(np.dtype("float64"))
class CodeGenProxy:
def __init__(self, host: str = 'triton', port: int = 8001, verbose: bool = False):
self.tokenizer = Tokenizer.from_file('/python-docker/cgtok/tokenizer.json')
self.client = client_util.InferenceServerClient(url=f'{host}:{port}', verbose=verbose)
self.PAD_CHAR = 50256
# Max number of tokens the model can handle
self.MAX_MODEL_LEN = 2048
class TokensExceedsMaximum(Exception):
pass
@staticmethod
def prepare_tensor(name: str, tensor_input):
t = client_util.InferInput(
name, tensor_input.shape, np_to_triton_dtype(tensor_input.dtype))
t.set_data_from_numpy(tensor_input)
return t
@staticmethod
def trim_with_stopwords(output: str, stopwords: list) -> str:
for w in sorted(stopwords, key=len, reverse=True):
if output.endswith(w):
output = output[:-len(w)]
break
return output
@staticmethod
def to_word_list_format(word_dict, tokenizer):
flat_ids = []
offsets = []
for word_dict_item in word_dict:
item_flat_ids = []
item_offsets = []
for word in word_dict_item:
ids = tokenizer.encode(word).ids
if len(ids) == 0:
continue
item_flat_ids += ids
item_offsets.append(len(ids))
# Hack, can we do this better?
if word == '\n\n':
item_flat_ids += [198, 198]
item_offsets.append(2)
flat_ids.append(np.array(item_flat_ids))
offsets.append(np.cumsum(np.array(item_offsets)))
pad_to = max(1, max(len(ids) for ids in flat_ids))
for i, (ids, offs) in enumerate(zip(flat_ids, offsets)):
flat_ids[i] = np.pad(ids, (0, pad_to - len(ids)), constant_values=0)
offsets[i] = np.pad(offs, (0, pad_to - len(offs)), constant_values=-1)
return np.array([flat_ids, offsets], dtype="int32").transpose((1, 0, 2))
def generate(self, data):
prompt = data['prompt']
n = data.get('n', 1)
model_name = data["model"]
# ugly hack to set the data type correctly. Huggingface models want int32, but fastertransformer needs uint32
# i could've done the conversion from uint32 to int32 in the model but that'd be inefficient.
np_type = np.int32 if model_name.startswith("py-") else np.uint32
input_start_ids = np.expand_dims(self.tokenizer.encode(prompt).ids, 0)
input_start_ids = np.repeat(input_start_ids, n, axis=0).astype(np_type)
prompt_len = input_start_ids.shape[1]
input_len = prompt_len * np.ones([input_start_ids.shape[0], 1]).astype(np_type)
max_tokens = data.get('max_tokens', 16)
prompt_tokens: int = input_len[0][0]
requested_tokens = max_tokens + prompt_tokens
if requested_tokens > self.MAX_MODEL_LEN:
print(1)
raise self.TokensExceedsMaximum(
f"This model's maximum context length is {self.MAX_MODEL_LEN}, however you requested "
f"{requested_tokens} tokens ({prompt_tokens} in your prompt; {max_tokens} for the completion). "
f"Please reduce your prompt; or completion length."
)
output_len = np.ones_like(input_len).astype(np_type) * max_tokens
num_logprobs = data.get('logprobs', -1)
if num_logprobs is None:
num_logprobs = -1
want_logprobs = num_logprobs > 0
temperature = data.get('temperature', 0.2)
if temperature == 0.0:
temperature = 1.0
top_k = 1
else:
top_k = data.get('top_k', 0)
top_p = data.get('top_p', 1.0)
frequency_penalty = data.get('frequency_penalty', 1.0)
runtime_top_k = top_k * np.ones([input_start_ids.shape[0], 1]).astype(np_type)
runtime_top_p = top_p * np.ones([input_start_ids.shape[0], 1]).astype(np.float32)
beam_search_diversity_rate = 0.0 * np.ones([input_start_ids.shape[0], 1]).astype(np.float32)
random_seed = np.random.randint(0, 2 ** 31 - 1, (input_start_ids.shape[0], 1), dtype=np.int32)
temperature = temperature * np.ones([input_start_ids.shape[0], 1]).astype(np.float32)
len_penalty = 1.0 * np.ones([input_start_ids.shape[0], 1]).astype(np.float32)
repetition_penalty = frequency_penalty * np.ones([input_start_ids.shape[0], 1]).astype(np.float32)
is_return_log_probs = want_logprobs * np.ones([input_start_ids.shape[0], 1]).astype(np.bool_)
beam_width = (1 * np.ones([input_start_ids.shape[0], 1])).astype(np_type)
start_ids = self.PAD_CHAR * np.ones([input_start_ids.shape[0], 1]).astype(np_type)
end_ids = self.PAD_CHAR * np.ones([input_start_ids.shape[0], 1]).astype(np_type)
stop_words = data.get('stop', [])
if stop_words is None:
stop_words = []
if stop_words:
stop_word_list = np.repeat(self.to_word_list_format([stop_words], self.tokenizer), input_start_ids.shape[0],
axis=0)
else:
stop_word_list = np.concatenate([np.zeros([input_start_ids.shape[0], 1, 1]).astype(
np.int32), (-1 * np.ones([input_start_ids.shape[0], 1, 1])).astype(np.int32)], axis=1)
# Not used
bad_words_list = np.concatenate([np.zeros([input_start_ids.shape[0], 1, 1]).astype(
np.int32), (-1 * np.ones([input_start_ids.shape[0], 1, 1])).astype(np.int32)], axis=1)
inputs = [
self.prepare_tensor("input_ids", input_start_ids),
self.prepare_tensor("input_lengths", input_len),
self.prepare_tensor("request_output_len", output_len),
self.prepare_tensor("runtime_top_k", runtime_top_k),
self.prepare_tensor("runtime_top_p", runtime_top_p),
self.prepare_tensor("beam_search_diversity_rate", beam_search_diversity_rate),
self.prepare_tensor("random_seed", random_seed),
self.prepare_tensor("temperature", temperature),
self.prepare_tensor("len_penalty", len_penalty),
self.prepare_tensor("repetition_penalty", repetition_penalty),
self.prepare_tensor("is_return_log_probs", is_return_log_probs),
self.prepare_tensor("beam_width", beam_width),
self.prepare_tensor("start_id", start_ids),
self.prepare_tensor("end_id", end_ids),
self.prepare_tensor("bad_words_list", bad_words_list),
self.prepare_tensor("stop_words_list", stop_word_list),
]
result = self.client.infer(model_name, inputs)
output_data = result.as_numpy("output_ids")
if output_data is None:
raise RuntimeError("No output data")
# All of these squeeze(1)s are to remove the beam width dimension.
output_data = output_data.squeeze(1)
if want_logprobs:
lp_data = result.as_numpy("output_log_probs").squeeze(1)
# clp_data = result.as_numpy("cum_log_probs").squeeze(1)
else:
lp_data = [None] * output_data.shape[0]
sequence_lengths = result.as_numpy("sequence_length").squeeze(1)
gen_len = sequence_lengths - input_len.squeeze(1)
decoded = self.tokenizer.decode_batch([out[prompt_len:prompt_len + g] for g, out in zip(gen_len, output_data)])
trimmed = [self.trim_with_stopwords(d, stop_words) for d in decoded]
choices = []
for i, (text, tokens, lps, g) in enumerate(zip(trimmed, output_data, lp_data, gen_len)):
reason = "length" if max_tokens == g else "stop"
if lps is not None:
tokens_str = [self.tokenizer.decode([t]) for t in tokens[prompt_len:prompt_len + g]]
offsets = [len(prompt)] + (np.cumsum([len(t) for t in tokens_str]) + len(prompt)).tolist()[:-1]
# Fake some log probs for top_logprobs
top_logprobs = []
for ii, t in enumerate(tokens_str):
fakedict = {}
top_token_lp = float(lps[ii])
fakedict[t] = top_token_lp
while len(fakedict) < num_logprobs:
random_token = random.randint(0, self.tokenizer.get_vocab_size() - 1)
random_token_str = self.tokenizer.decode([random_token])
if random_token_str in fakedict:
continue
random_token_lp = top_token_lp - random.random()
fakedict[random_token_str] = random_token_lp
top_logprobs.append(fakedict)
lpdict = {
'token_logprobs': lps.tolist(),
'top_logprobs': top_logprobs,
'tokens': tokens_str,
'text_offset': offsets,
}
else:
lpdict = None
choice = {
'text': text,
'index': i,
'finish_reason': reason,
'logprobs': lpdict,
}
choices.append(choice)
completion = {
'id': None, # fill in
'model': 'codegen',
'object': 'text_completion',
'created': int(time.time()),
'choices': None, # fill in
'usage': {
'completion_tokens': int(gen_len.sum()),
'prompt_tokens': int(prompt_len),
'total_tokens': int(gen_len.sum() + prompt_len),
}
}
return completion, choices
@staticmethod
def random_completion_id():
return 'cmpl-' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))
def streamed_response(self, completion, choices):
for c in choices:
completion['id'] = self.random_completion_id()
completion['choices'] = [c]
yield f'{json.dumps(completion)}'
yield '[DONE]'
def non_streamed_response(self, completion, choices) -> str:
completion['id'] = self.random_completion_id()
completion['choices'] = choices
return json.dumps(completion)
def __call__(self, data: dict):
st = time.time()
try:
completion, choices = self.generate(data)
except InferenceServerException as exc:
# status: unavailable -- this happens if the `model` string is invalid
print(exc)
if exc.status() == 'StatusCode.UNAVAILABLE':
print(
f"WARNING: Model '{data['model']}' is not available. Please ensure that "
"`model` is set to either 'fastertransformer' or 'py-model' depending on "
"your installation"
)
completion = {}
choices = []
ed = time.time()
print(f"Returned completion in {(ed - st) * 1000} ms")
if data.get('stream', False):
return self.streamed_response(completion, choices)
else:
return self.non_streamed_response(completion, choices)
| fauxpilot/copilot_proxy/utils/codegen.py/0 | {
"file_path": "fauxpilot/copilot_proxy/utils/codegen.py",
"repo_id": "fauxpilot",
"token_count": 5675
} | 87 |
Subsets and Splits