text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
# Experimental environment: 4 * A100
# 4 * 74GB GPU memory
CUDA_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type dbrx-instruct \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype bf16 \
--output_dir output \
--ddp_backend nccl \
--dataset blossom-math-zh \
--num_train_epochs 1 \
--max_length 1024 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--lora_dtype AUTO \
--gradient_checkpointing false \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn true
| swift/examples/pytorch/llm/scripts/dbrx-instruct/lora_mp/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/dbrx-instruct/lora_mp/sft.sh",
"repo_id": "swift",
"token_count": 401
} | 200 |
# Experimental environment: A100
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_infer.py \
--ckpt_dir "output/internlm-20b/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.3 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
| swift/examples/pytorch/llm/scripts/internlm_20b/lora_ddp/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/internlm_20b/lora_ddp/infer.sh",
"repo_id": "swift",
"token_count": 157
} | 201 |
# Experimental environment: A10
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python rome_infer.py \
--model_id_or_path modelscope/Llama-2-13b-chat-ms \
--model_revision master \
--template_type AUTO \
--dtype AUTO \
--max_new_tokens 128 \
--temperature 0.1 \
--top_p 0.7 \
--do_sample true \
--rome_request_file rome_example/request.json
| swift/examples/pytorch/llm/scripts/llama2_13b_chat/rome.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/llama2_13b_chat/rome.sh",
"repo_id": "swift",
"token_count": 165
} | 202 |
# Experimental environment: A100
# 30GB GPU memory
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/mengzi3-13b-base/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
| swift/examples/pytorch/llm/scripts/mengzi3_13b_base/lora_ddp_ds/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/mengzi3_13b_base/lora_ddp_ds/infer.sh",
"repo_id": "swift",
"token_count": 136
} | 203 |
# Experimental environment: A10
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/phi3-4b-4k-instruct/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--use_flash_attn false \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
| swift/examples/pytorch/llm/scripts/phi3_4b_4k_instruct/lora/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/phi3_4b_4k_instruct/lora/infer.sh",
"repo_id": "swift",
"token_count": 137
} | 204 |
# Experiment env: A10, RTX3090/4090, A100
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_infer.py \
--ckpt_dir "output/qwen1half-7b-chat-awq/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--use_flash_attn false \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--stream false \
--merge_lora false \
| swift/examples/pytorch/llm/scripts/qwen1half_7b_chat_awq/lora/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen1half_7b_chat_awq/lora/infer.sh",
"repo_id": "swift",
"token_count": 194
} | 205 |
# Experimental environment: A10
# 11GB GPU memory
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_sft.py \
--model_id_or_path qwen/Qwen-VL-Chat-Int4 \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype fp16 \
--output_dir output \
--dataset coco-en-mini \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 2048 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules c_attn attn.c_proj \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn false \
--push_to_hub false \
--hub_model_id qwen-vl-chat-int4-qlora \
--hub_private_repo true \
--hub_token 'your-sdk-token' \
| swift/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora/sft.sh",
"repo_id": "swift",
"token_count": 481
} | 206 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.aigc import infer_controlnet_sdxl
if __name__ == '__main__':
infer_controlnet_sdxl()
| swift/examples/pytorch/sdxl/infer_controlnet_sdxl.py/0 | {
"file_path": "swift/examples/pytorch/sdxl/infer_controlnet_sdxl.py",
"repo_id": "swift",
"token_count": 56
} | 207 |
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python infer_text_to_image_sdxl.py \
--pretrained_model_name_or_path "AI-ModelScope/stable-diffusion-xl-base-1.0" \
--unet_model_path "train_text_to_image_sdxl/checkpoint-10000/unet" \
--prompt "A pokemon with green eyes and red legs." \
--image_save_path "sdxl_pokemon.png" \
--torch_dtype "fp16" \
| swift/examples/pytorch/sdxl/scripts/run_infer_text_to_image_sdxl.sh/0 | {
"file_path": "swift/examples/pytorch/sdxl/scripts/run_infer_text_to_image_sdxl.sh",
"repo_id": "swift",
"token_count": 162
} | 208 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.aigc import train_text_to_image_lora
if __name__ == '__main__':
train_text_to_image_lora()
| swift/examples/pytorch/sdxl/train_text_to_image_lora.py/0 | {
"file_path": "swift/examples/pytorch/sdxl/train_text_to_image_lora.py",
"repo_id": "swift",
"token_count": 60
} | 209 |
{
"cmd": "sft",
"requirements":{
"gpu": "1",
"ddp": "1"
},
"eval_requirements": {
"gpu": "1"
},
"eval_dataset": ["ceval", "gsm8k", "arc"],
"args": {
"model_type": "qwen1half-7b-chat-int8",
"dataset": "ms-agent",
"train_dataset_mix_ratio": 2.0,
"batch_size": 1,
"max_length": 2048,
"use_loss_scale": true,
"gradient_accumulation_steps": 16,
"learning_rate": 5e-5,
"use_flash_attn": true,
"eval_steps": 2000,
"save_steps": 2000,
"train_dataset_sample": -1,
"val_dataset_sample": 5000,
"num_train_epochs": 2,
"gradient_checkpointing": true,
"weight_decay": 0.01,
"warmup_ratio": 0.03,
"save_total_limit": 2,
"logging_steps": 10,
"sft_type": "lora",
"lora_target_modules": "ALL",
"lora_rank": 8,
"lora_alpha": 32
},
"experiment": [
{
"name": "qwen1half-7b-chat-int8"
}
]
}
| swift/scripts/benchmark/config/gptq.json/0 | {
"file_path": "swift/scripts/benchmark/config/gptq.json",
"repo_id": "swift",
"token_count": 544
} | 210 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from typing import TYPE_CHECKING
from .utils.import_utils import _LazyModule
if TYPE_CHECKING:
from .version import __version__, __release_datetime__
from .tuners import (Adapter, AdapterConfig, AdapterModule, SwiftModel, LoRA, LoRAConfig, SWIFT_MAPPING,
AdaLoraConfig, IA3Config, LoftQConfig, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig,
PeftConfig, PeftModel, PeftModelForCausalLM, ResTuningConfig, SideConfig,
PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification,
PrefixTuningConfig, PromptEncoderConfig, PromptLearningConfig, PromptTuningConfig,
get_peft_config, get_peft_model, get_peft_model_state_dict, Prompt, PromptConfig, PromptModule,
SwiftConfig, SwiftOutput, Swift, SwiftTuners, LongLoRAConfig, LongLoRA, LongLoRAModelType,
SCETuning, SCETuningConfig)
from .hub import snapshot_download, push_to_hub, push_to_hub_async, push_to_hub_in_queue
from .trainers import (EvaluationStrategy, FSDPOption, HPSearchBackend, HubStrategy, IntervalStrategy,
SchedulerType, ShardedDDPOption, TrainingArguments, Seq2SeqTrainingArguments, Trainer,
Seq2SeqTrainer)
from .utils import get_logger
else:
_import_structure = {
'version': ['__release_datetime__', '__version__'],
'hub': ['snapshot_download', 'push_to_hub', 'push_to_hub_async', 'push_to_hub_in_queue'],
'tuners': [
'Adapter', 'AdapterConfig', 'AdapterModule', 'SwiftModel', 'LoRA', 'LoRAConfig', 'SWIFT_MAPPING',
'LoraConfig', 'AdaLoraConfig', 'IA3Config', 'LoftQConfig', 'LoHaConfig', 'LoKrConfig', 'OFTConfig',
'PeftConfig', 'ResTuningConfig', 'SideConfig', 'PeftModel', 'PeftModelForCausalLM', 'PeftModelForSeq2SeqLM',
'PeftModelForSequenceClassification', 'PeftModelForTokenClassification', 'PrefixTuningConfig',
'PromptEncoderConfig', 'PromptLearningConfig', 'PromptTuningConfig', 'get_peft_config', 'get_peft_model',
'get_peft_model_state_dict', 'Prompt', 'PromptConfig', 'PromptModule', 'SwiftConfig', 'SwiftOutput',
'Swift', 'SwiftTuners', 'LongLoRAConfig', 'LongLoRA', 'LongLoRAModelType', 'SCETuning', 'SCETuningConfig'
],
'trainers': [
'EvaluationStrategy', 'FSDPOption', 'HPSearchBackend', 'HubStrategy', 'IntervalStrategy', 'SchedulerType',
'ShardedDDPOption', 'TrainingArguments', 'Seq2SeqTrainingArguments', 'Trainer', 'Seq2SeqTrainer'
],
'utils': ['get_logger']
}
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
module_spec=__spec__,
extra_objects={},
)
| swift/swift/__init__.py/0 | {
"file_path": "swift/swift/__init__.py",
"repo_id": "swift",
"token_count": 1306
} | 211 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.llm import app_ui_main
if __name__ == '__main__':
app_ui_main()
| swift/swift/cli/app_ui.py/0 | {
"file_path": "swift/swift/cli/app_ui.py",
"repo_id": "swift",
"token_count": 48
} | 212 |
{
"response":{
"Name:": [1.0, 3.0],
"Action:": [1.0, 3.0],
"ACTION:": [1.0,3.0],
"Tool:": [1.0, 3.0],
"Command": [1.0, 3.0],
"Arguments:": [1.0, 3.0],
"action input": [1.0, 3.0],
"ACTION_INPUT:":[1.0, 3.0],
"Action Input:": [1.0, 3.0],
"Thought:": [1.0, 1.0],
"Final Answer:": [1.0, 1.0],
"Observation:": [2.0, 0.0]
},
"query":{
"What is the tool you want to use": [3.0],
"What are the required parameter names": [3.0],
"What is the value of": [3.0],
"What are the required parameter names for this tool": [3.0]
}
}
| swift/swift/llm/agent/agentflan.json/0 | {
"file_path": "swift/swift/llm/agent/agentflan.json",
"repo_id": "swift",
"token_count": 376
} | 213 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import os
from functools import partial
from typing import Any, Dict, Union
import json
import torch
from modelscope import BitsAndBytesConfig, GenerationConfig
from transformers import IntervalStrategy
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.utils import is_torch_npu_available
from swift.torchacc_utils import patch_acc_model
from swift.trainers import Seq2SeqTrainer
from swift.trainers.utils import can_return_loss, find_labels
from swift.utils import (append_to_jsonl, check_json_format, compute_acc_metrics, compute_nlg_metrics, get_dist_setting,
get_logger, get_main, get_model_info, is_ddp_plus_mp, is_dist, is_local_master, is_master,
plot_images, preprocess_logits_for_metrics, seed_everything, show_layers, use_torchacc)
from .accelerator import ta_accelerate
from .tuner import prepare_model
from .utils import (TEMPLATE_MAPPING, LazyLLMDataset, SftArguments, Template, dataset_map, get_dataset,
get_model_tokenizer, get_template, get_time_info, print_example, set_generation_config,
sort_by_max_length, stat_dataset)
logger = get_logger()
def llm_sft(args: SftArguments) -> Dict[str, Union[str, Any]]:
logger.info(f'args: {args}')
seed_everything(args.seed)
training_args = args.training_args
if is_torch_npu_available():
print(f'device_count: {torch.npu.device_count()}')
else:
print(f'device_count: {torch.cuda.device_count()}')
rank, local_rank, world_size, local_world_size = get_dist_setting()
print(f'rank: {rank}, local_rank: {local_rank}, ' f'world_size: {world_size}, local_world_size: {local_world_size}')
if args.gpu_memory_fraction is not None:
for device_id in range(torch.cuda.device_count()):
torch.cuda.set_per_process_memory_fraction(max(min(args.gpu_memory_fraction, 1.0), 0.01), device=device_id)
# Loading Model and Tokenizer
if is_deepspeed_zero3_enabled():
model_kwargs = {'device_map': None}
elif is_torch_npu_available():
model_kwargs = {'device_map': local_rank if local_rank >= 0 else 0}
elif args.device_map_config_path is not None:
cwd = os.getcwd()
config_path = args.device_map_config_path if os.path.isabs(args.device_map_config_path) else os.path.join(
cwd, args.device_map_config_path)
with open(config_path, 'r') as json_file:
model_kwargs = {'device_map': json.load(json_file)}
else:
model_kwargs = {'low_cpu_mem_usage': True}
if is_dist() and not is_ddp_plus_mp():
model_kwargs['device_map'] = {'': local_rank}
elif torch.cuda.device_count() == 1:
model_kwargs['device_map'] = 'cuda:0'
elif not use_torchacc():
model_kwargs['device_map'] = 'auto'
if args.quant_method == 'hqq':
from transformers import HqqConfig
if args.hqq_dynamic_config_path is not None:
cwd = os.getcwd()
config_path = args.hqq_dynamic_config_path if os.path.isabs(args.hqq_dynamic_config_path) else os.path.join(
cwd, args.hqq_dynamic_config_path)
with open(config_path, 'r') as json_file:
quantization_config = HqqConfig(dynamic_config=json.load(json_file))
else:
if args.quantization_bit == 0:
logger.info("You haven't set the quantization_bit parameter; set it to 8.")
args.quantization_bit = 8
quantization_config = HqqConfig(nbits=args.quantization_bit, axis=args.hqq_axis)
logger.info(f'quantization_config: {quantization_config.__dict__}')
model_kwargs['quantization_config'] = quantization_config
elif args.quant_method == 'eetq':
from transformers import EetqConfig
quantization_config = EetqConfig('int8')
logger.info(f'quantization_config: {quantization_config.__dict__}')
model_kwargs['quantization_config'] = quantization_config
elif args.load_in_8bit or args.load_in_4bit: # bnb
quantization_config = BitsAndBytesConfig(
args.load_in_8bit,
args.load_in_4bit,
bnb_4bit_compute_dtype=args.bnb_4bit_compute_dtype,
bnb_4bit_quant_type=args.bnb_4bit_quant_type,
bnb_4bit_use_double_quant=args.bnb_4bit_use_double_quant)
logger.info(f'quantization_config: {quantization_config.__dict__}')
model_kwargs['quantization_config'] = quantization_config
kwargs = {
'max_length': args.max_length,
'use_unsloth': args.tuner_backend == 'unsloth',
'load_in_4bit': args.quantization_bit == 4
}
if args.use_flash_attn is not None:
kwargs['use_flash_attn'] = args.use_flash_attn
if args.local_repo_path:
kwargs['local_repo_path'] = args.local_repo_path
if args.quant_method == 'awq':
kwargs['is_awq'] = True
elif args.quant_method == 'aqlm':
kwargs['is_aqlm'] = True
elif args.quant_method == 'gptq':
kwargs['is_gptq'] = True
if args.rope_scaling:
kwargs['rope_scaling'] = args.rope_scaling
kwargs['max_length'] = args.max_length
model, tokenizer = get_model_tokenizer(
args.model_type,
args.torch_dtype,
model_kwargs,
model_id_or_path=args.model_id_or_path,
revision=args.model_revision,
is_training=True,
**kwargs)
logger.info(f'model_config: {model.config}')
generation_config = GenerationConfig(
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
do_sample=args.do_sample,
repetition_penalty=args.repetition_penalty,
num_beams=args.num_beams,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id)
logger.info(f'generation_config: {generation_config}')
set_generation_config(model, generation_config)
training_args.generation_config = generation_config
if use_torchacc():
import torchacc as ta
# Get `label` and `return_loss` before 'ta_accelerate' because it will
# wrapper the model and make these properties wrong.
label_names = find_labels(model)
return_loss = can_return_loss(model)
model = patch_acc_model(model, args)
# Preparing LoRA
model, callbacks = prepare_model(model, args)
show_layers(model)
logger.info(model)
model_info = None
if not is_deepspeed_zero3_enabled():
model_info = get_model_info(model)
logger.info(model_info)
if args.gradient_checkpointing:
model.config.use_cache = False # fix transformers==4.36
logger.info('Setting model.config.use_cache: False')
model.enable_input_require_grads()
if use_torchacc():
model.config.use_cache = False
logger.info('Setting model.config.use_cache: False')
model = ta_accelerate(
model,
args.fsdp_num,
args.model_layer_cls_name,
args.bf16,
args.fp16,
gradient_checkpointing=True,
fsdp_flatten_parameters=False)
# Loading Dataset
train_dataset, val_dataset = get_dataset(
args.dataset,
args.dataset_test_ratio,
args.dataset_seed,
check_dataset_strategy=args.check_dataset_strategy,
model_name=args.model_name,
model_author=args.model_author)
if len(args.val_dataset) > 0:
# Loading val dataset
_, val_dataset = get_dataset(
args.val_dataset,
1.0,
args.dataset_seed,
check_dataset_strategy=args.check_dataset_strategy,
model_name=args.model_name,
model_author=args.model_author)
train_dataset, val_dataset = args._handle_dataset_compat(train_dataset, val_dataset)
training_args.train_dataset_sample = train_dataset.shape[0] if train_dataset is not None else 0
logger.info(f'train_dataset: {train_dataset}')
logger.info(f'val_dataset: {val_dataset}')
template_kwargs = {}
template_info = TEMPLATE_MAPPING[args.template_type]
use_model = template_info.get('use_model', False)
if use_model:
template_kwargs['model'] = model
template_kwargs['use_loss_scale'] = args.use_loss_scale
if args.loss_scale_config_path is not None:
cwd = os.getcwd()
config_path = args.loss_scale_config_path if os.path.isabs(args.loss_scale_config_path) else os.path.join(
cwd, args.loss_scale_config_path)
with open(config_path, 'r') as json_file:
template_kwargs['loss_scale_map'] = json.load(json_file)
template_kwargs['tools_prompt'] = args.tools_prompt
if args.sequence_parallel_size and args.sequence_parallel_size > 1:
template_kwargs['sequence_parallel_size'] = args.sequence_parallel_size
template: Template = get_template(args.template_type, tokenizer, args.system, args.max_length,
args.truncation_strategy, **template_kwargs)
args.system = template.default_system
logger.info(f'system: {args.system}')
logger.info(f'args.lazy_tokenize: {args.lazy_tokenize}')
if args.packing:
from swift.llm.utils.utils import ConstantLengthDataset
train_dataset = ConstantLengthDataset.get_packed_dataset(
template, train_dataset, args.max_length, lazy_tokenize=args.lazy_tokenize)
if val_dataset is not None:
val_dataset = ConstantLengthDataset.get_packed_dataset(
template, val_dataset, args.max_length, lazy_tokenize=args.lazy_tokenize)
dataset_info = {}
if not args.lazy_tokenize:
td0 = train_dataset[0]
print_example(td0, tokenizer, {})
dataset_info['train_dataset'] = stat_dataset(train_dataset)
if val_dataset is not None:
dataset_info['val_dataset'] = stat_dataset(val_dataset)
elif not args.lazy_tokenize:
dataset_info = {}
logger.info(f'Using num_proc: {args.preprocess_num_proc}')
train_dataset = dataset_map(train_dataset, template.encode, args.preprocess_num_proc)
if val_dataset is not None:
val_dataset = dataset_map(val_dataset, template.encode, args.preprocess_num_proc)
if args.test_oom_error:
train_dataset = sort_by_max_length(train_dataset, 20000)
# Data analysis
if train_dataset is None:
logger.error('Error accessing train_dataset properties. '
'Please ensure that the dataset is properly initialized,'
'and every sample of the train_dataset not empty.')
raise AttributeError('Failed to access dataset attributes,train_dataset is None. This might be because:\n'
'(1) The dataset contains None for input or labels;\n'
"(2) The 'max_length' setting is too short causing data truncation.")
td0, tkwargs0 = train_dataset.data[0]
print_example(td0, tokenizer, tkwargs0)
dataset_info['train_dataset'] = stat_dataset(train_dataset)
if val_dataset is not None:
dataset_info['val_dataset'] = stat_dataset(val_dataset)
else:
dataset_info = None
td0, tkwargs0 = template.encode(train_dataset[0])
print_example(td0, tokenizer, tkwargs0)
train_dataset = LazyLLMDataset(train_dataset, template)
if val_dataset is not None:
val_dataset = LazyLLMDataset(val_dataset, template)
if val_dataset is None:
training_args.evaluation_strategy = IntervalStrategy.NO
training_args.eval_strategy = IntervalStrategy.NO
training_args.do_eval = False
padding_to = args.max_length if args.sft_type == 'longlora' else None
data_collator = partial(template.data_collator, padding_to=padding_to)
train_batch_size = args.batch_size
eval_batch_size = args.eval_batch_size
if use_torchacc():
train_batch_size *= world_size
eval_batch_size *= world_size
training_args.per_device_train_batch_size = train_batch_size
training_args.per_device_eval_batch_size = eval_batch_size
training_args.group_by_length = use_torchacc()
# Trainer
logger.info(f'training_args: {training_args}')
trainer_kwargs = {}
if args.predict_with_generate:
trainer_kwargs['compute_metrics'] = partial(compute_nlg_metrics, tokenizer=tokenizer)
else:
compute_metrics = partial(compute_acc_metrics, acc_strategy=args.acc_strategy)
trainer_kwargs['compute_metrics'] = compute_metrics
trainer_kwargs['preprocess_logits_for_metrics'] = preprocess_logits_for_metrics
if args.check_model_is_latest is False:
trainer_kwargs['check_model'] = False
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer,
callbacks=callbacks,
sequence_parallel_size=args.sequence_parallel_size,
**trainer_kwargs)
trainer.sft_args = args
if use_torchacc():
trainer.label_names = label_names
trainer.can_return_loss = return_loss
if is_master():
for args_obj, fname in zip([args, training_args], ['sft_args.json', 'training_args.json']):
fpath = os.path.join(args.output_dir, fname)
logger.info(f'The {args_obj.__class__.__name__} will be saved in: {fpath}')
args_dict = args_obj.__dict__
args_dict.pop('hub_token', None)
args_dict.pop('push_to_hub_token', None)
with open(fpath, 'w', encoding='utf-8') as f:
json.dump(check_json_format(args_dict), f, ensure_ascii=False, indent=2)
logging_path = os.path.join(args.output_dir, 'logging.jsonl')
logger.info(f'The logging file will be saved in: {logging_path}')
trainer.train(training_args.resume_from_checkpoint)
last_model_checkpoint = getattr(trainer.state, 'last_model_checkpoint', None)
logger.info(f'last_model_checkpoint: {last_model_checkpoint}')
logger.info(f'best_model_checkpoint: {trainer.state.best_model_checkpoint}')
train_time = get_time_info(trainer.state.log_history, len(train_dataset))
# Visualization
if is_master() and not use_torchacc():
if 'tensorboard' in args.training_args.report_to:
images_dir = os.path.join(args.output_dir, 'images')
logger.info(f'images_dir: {images_dir}')
plot_images(images_dir, args.logging_dir, ['train/loss'], 0.9)
if args.push_to_hub:
trainer._add_patterns_to_gitignore(['images/'])
trainer.push_to_hub()
run_info = {
'memory': trainer.perf['memory'],
'train_time': train_time,
'last_model_checkpoint': last_model_checkpoint,
'best_model_checkpoint': trainer.state.best_model_checkpoint,
'best_metric': trainer.state.best_metric,
'global_step': trainer.state.global_step,
'log_history': trainer.state.log_history,
'model_info': model_info,
'dataset_info': dataset_info,
}
for key in ['gen_time', 'gen_len']:
if trainer.perf[key] != 0:
run_info[key] = trainer.perf[key]
if is_local_master():
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
append_to_jsonl(jsonl_path, run_info)
return run_info
def get_sft_main(args, llm):
if use_torchacc():
logger.warning('TorchAcc is currently only available internally ' 'within Alibaba Cloud.')
import torchacc as ta
# This patch should be called before `llm_sft`.
ta.accelerate_hf_trainer()
return get_main(args, llm)
sft_main = get_sft_main(SftArguments, llm_sft)
| swift/swift/llm/sft.py/0 | {
"file_path": "swift/swift/llm/sft.py",
"repo_id": "swift",
"token_count": 7183
} | 214 |
import os
import shutil
from typing import Any, Dict, List, Literal, Optional, Union
import numpy as np
from swift.hub.utils.utils import get_cache_dir
class MediaTag:
task_prompts = {
'ref_grounding': {
'en': [('<ref-object>', '<bbox>'), ('The positions of <ref-object> is', '<bbox>'),
('Find the positions of <ref-object>', '<bbox>'), ('Where is <ref-object>', '<bbox>'),
('Find <ref-object>', '<bbox>'), ('Show me <ref-object>', '<bbox>'),
('Provide the bounding box coordinate of <ref-object>', '<bbox>')],
'zh': [('<ref-object>', '<bbox>'), ('<ref-object>的位置在图片中', '<bbox>'), ('<ref-object>在图片中', '<bbox>'),
('<ref-object>在', '<bbox>'), ('找到<ref-object>的位置', '<bbox>'), ('<ref-object>在哪里', '<bbox>'),
('提供<ref-object>的坐标位置', '<bbox>')]
},
'grounding_caption': {
'en': [
('<bbox>', '<ref-object>'),
('The object at position <bbox>', '<ref-object>'),
('This <bbox> is', '<ref-object>'),
('What is the thing at <bbox>', '<ref-object>'),
('Describe <bbox>', '<ref-object>'),
('<bbox> is', '<ref-object>'),
('The bounding box coordinate <bbox> contains', '<ref-object>'),
],
'zh': [
('<bbox>', '<ref-object>'),
('<bbox>是什么', '<ref-object>'),
('<bbox>的位置包含', '<ref-object>'),
('描述<bbox>', '<ref-object>'),
('<bbox>中是', '<ref-object>'),
('坐标<bbox>描述了什么', '<ref-object>'),
('描述<bbox>中的事物', '<ref-object>'),
]
},
}
standard_tags = {
'image': '<image>',
'audio': '<audio_label>',
'video': '<video_label>',
}
media_keys = {
'audio': 'audios',
'image': 'images',
'video': 'videos',
}
def __init__(self,
media_type: Optional[Literal['image', 'audio', 'video']],
media_tag=None,
task_type: Literal['caption_with_grounding', 'ref_grounding', 'grounding_caption', 'ocr',
'vqa'] = 'vqa'):
self.media_type = media_type
self.task_type = task_type
self.media_tag = media_tag or '<unused_tag>'
def __call__(self, d: Dict[str, Any], medias: Union[tuple, list], objects: List = None) -> None:
"""Format the query/response/history with medias
Args:
d: A dict contains history/query/response
medias: A list of medias(one round, multiple medias),
a single media(one round, one media), or a tuple of media list(multiple rounds)
objects: A list of object-bbox pairs(one round), or a tuple of object-bbox lists(multiple rounds)
"""
if not self.media_type:
return
media_cnt = len(medias) if isinstance(medias, (tuple, list)) else 1 if medias else 0
history = d.get('history') or []
query = d.get('query')
response = d.get('response')
if self.task_type == 'caption_with_grounding':
pass
elif self.task_type in ('ref_grounding', 'grounding_caption'):
lang = np.random.choice(['en', 'zh'], p=[0.8, 0.2])
query, response = np.random.choice(self.task_prompts[self.task_type][lang])
elif self.task_type == 'ocr':
raise NotImplementedError
else:
pass
standard_tag = self.standard_tags[self.media_type]
all_queries = ''.join([h[0] for h in history]) + query
if self.media_tag in all_queries:
assert all_queries.count(self.media_tag) == media_cnt
for h in history:
h[0] = h[0].replace(self.media_tag, standard_tag)
query = query.replace(self.media_tag, standard_tag)
if 'history' in d:
d['history'] = history
d['query'] = query
if 'response' in d:
d['response'] = response
class MediaCache:
cache_dir = os.path.join(get_cache_dir(), 'media_resources')
media_type_urls = {
'llava', 'coco', 'sam', 'gqa', 'ocr_vqa', 'textvqa', 'VG_100K', 'VG_100K_2', 'share_textvqa', 'web-celebrity',
'web-landmark', 'wikiart'
}
URL_PREFIX = 'https://www.modelscope.cn/api/v1/datasets/hjh0119/sharegpt4v-images/repo?Revision=master&FilePath='
@staticmethod
def get_url(media_type):
is_ocr_vqa = (media_type == 'ocr_vqa')
extension = 'tar' if is_ocr_vqa else 'zip'
return f'{MediaCache.URL_PREFIX}{media_type}.{extension}'
@staticmethod
def download(media_type, media_name=None):
from swift.utils import safe_ddp_context
with safe_ddp_context():
return MediaCache._safe_download(media_type=media_type, media_name=media_name)
@staticmethod
def _safe_download(media_type, media_name=None):
media_name = media_name or media_type
if media_type in MediaCache.media_type_urls:
media_type = MediaCache.get_url(media_type)
from datasets.download.download_manager import DownloadManager, DownloadConfig
final_folder = os.path.join(MediaCache.cache_dir, media_name)
if os.path.exists(final_folder):
return final_folder
local_dirs = DownloadManager(download_config=DownloadConfig(
cache_dir=MediaCache.cache_dir)).download_and_extract(media_type)
shutil.move(str(local_dirs), final_folder)
return final_folder
@staticmethod
def safe_save(image, file_name, folder, format='JPEG'):
folder = os.path.join(MediaCache.cache_dir, folder)
os.makedirs(folder, exist_ok=True)
file = os.path.join(folder, file_name)
if os.path.exists(file):
return file
image.save(file, format=format)
return file
| swift/swift/llm/utils/media.py/0 | {
"file_path": "swift/swift/llm/utils/media.py",
"repo_id": "swift",
"token_count": 2989
} | 215 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import os
from dataclasses import dataclass, field
from typing import List, Optional
import torch
from transformers.training_args import TrainingArguments as HfTrainingArguments
from transformers.training_args_seq2seq import Seq2SeqTrainingArguments as HfSeq2SeqTrainingArguments
from transformers.utils import is_accelerate_available
from swift.utils import is_dist, use_torchacc
@dataclass
class SwiftArgumentsMixin:
# ckpt only save model
save_only_model: bool = False
train_sampler_random: bool = True
push_hub_strategy: str = field(
default='push_best', metadata={'choices': {'end', 'push_best', 'push_last', 'checkpoint', 'all_checkpoints'}})
acc_strategy: str = field(default='token', metadata={'choices': ['token', 'sentence']})
additional_saved_files: Optional[List[str]] = None
metric_warmup_step: Optional[float] = 0
train_dataset_sample: Optional[int] = -1
def __post_init__(self):
if is_dist() and self.ddp_backend == 'nccl' and torch.cuda.is_available() and is_accelerate_available():
try:
from accelerate.utils import check_cuda_p2p_ib_support
if not check_cuda_p2p_ib_support():
os.environ['NCCL_P2P_DISABLE'] = '1'
os.environ['NCCL_IB_DISABLE'] = '1'
except ImportError:
pass
if self.additional_saved_files is None:
self.additional_saved_files = []
super().__post_init__()
@dataclass
class TrainingArguments(SwiftArgumentsMixin, HfTrainingArguments):
pass
@dataclass
class Seq2SeqTrainingArguments(SwiftArgumentsMixin, HfSeq2SeqTrainingArguments):
@property
def place_model_on_device(self):
return False if use_torchacc() else super().place_model_on_device
| swift/swift/trainers/arguments.py/0 | {
"file_path": "swift/swift/trainers/arguments.py",
"repo_id": "swift",
"token_count": 740
} | 216 |
from typing import Any, Dict, Union
import torch
from torch import nn
from transformers import PreTrainedModel, trainer
from trl import ORPOTrainer as HFORPOTrainer
from swift.llm.utils.template import Template
from swift.llm.utils.utils import sort_by_max_length
from swift.utils import get_logger
from .callback import DefaultFlowCallbackNew, PrinterCallbackNew, ProgressCallbackNew
from .mixin import PushToMsHubMixin, SwiftMixin
from .utils import build_tokenized_answer, concat_template
logger = get_logger()
class ORPOTrainer(PushToMsHubMixin, SwiftMixin, HFORPOTrainer):
def __init__(self, *args, template: Template, test_oom_error=False, **kwargs):
self.template = template
super().__init__(*args, **kwargs)
train_ds_info = self.stat_dataset(self.train_dataset)
val_ds_info = self.stat_dataset(self.eval_dataset)
self.dataset_info = {'train_dataset': train_ds_info, 'val_dataset': val_ds_info}
if test_oom_error:
self.train_dataset = sort_by_max_length(self.train_dataset, 20000)
# performance
self.perf: Dict[str, Any] = {
'gen_time': 0.,
'gen_len': 0,
'memory': {},
'model': self.model.get_trainable_parameters() if hasattr(self.model, 'get_trainable_parameters') else None,
}
def train(self, *args, **kwargs) -> torch.Tensor:
res = super().train(*args, **kwargs)
for i in range(torch.cuda.device_count()):
self.perf['memory'][f'cuda:{i}'] = f'{torch.cuda.max_memory_reserved(i)/1024/1024/1024:.2f}GiB'
return res
def tokenize_row(self, feature, model: Union[PreTrainedModel, nn.Module] = None) -> Dict:
batch = {}
if not self.is_encoder_decoder:
prompt, chosen, rejected, loss_scale = concat_template(feature, self.template)
prompt_tokens, _, _, _ = self.template._encode_context_list(prompt, loss_scale)
prompt_tokens = {
'input_ids': prompt_tokens,
'attention_mask': [1] * len(prompt_tokens),
}
prompt_tokens = {f'prompt_{k}': v for k, v in prompt_tokens.items()}
if not isinstance(chosen, str):
raise ValueError(f'chosen should be an str but got {type(chosen)}')
chosen_tokens = build_tokenized_answer(chosen, self.template)
# Avoid tokenizing the prompt repeatedly.
chosen_tokens.update(prompt_tokens)
if not isinstance(rejected, str):
raise ValueError(f'rejected should be an str but got {type(rejected)}')
rejected_tokens = build_tokenized_answer(rejected, self.template)
rejected_tokens.update(prompt_tokens)
longer_response_length = max(len(chosen_tokens['input_ids']), len(rejected_tokens['input_ids']))
# if combined sequence is too long, truncate the prompt
for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length:
if self.truncation_mode == 'keep_start':
for k in ['prompt_input_ids', 'prompt_attention_mask']:
answer_tokens[k] = answer_tokens[k][:self.max_prompt_length]
elif self.truncation_mode == 'keep_end':
for k in ['prompt_input_ids', 'prompt_attention_mask']:
answer_tokens[k] = answer_tokens[k][-self.max_prompt_length:]
else:
raise ValueError(f'Unknown truncation mode: {self.truncation_mode}')
# if that's still too long, truncate the response
for answer_tokens in [chosen_tokens, rejected_tokens]:
if len(answer_tokens['prompt_input_ids']) + longer_response_length > self.max_length:
for k in ['input_ids', 'attention_mask']:
answer_tokens[k] = answer_tokens[k][:self.max_length - self.max_prompt_length]
# Create labels
chosen_sequence_tokens = {
k: chosen_tokens[f'prompt_{k}'] + chosen_tokens[k]
for k in ['input_ids', 'attention_mask']
}
rejected_sequence_tokens = {
k: rejected_tokens[f'prompt_{k}'] + rejected_tokens[k]
for k in ['input_ids', 'attention_mask']
}
chosen_sequence_tokens['labels'] = chosen_sequence_tokens['input_ids'][:]
_paddings = [self.label_pad_token_id] * len(chosen_tokens['prompt_input_ids'])
chosen_sequence_tokens['labels'][:len(chosen_tokens['prompt_input_ids'])] = _paddings
rejected_sequence_tokens['labels'] = rejected_sequence_tokens['input_ids'][:]
_paddings = [self.label_pad_token_id] * len(rejected_tokens['prompt_input_ids'])
rejected_sequence_tokens['labels'][:len(rejected_tokens['prompt_input_ids'])] = _paddings
for k, toks in {
'chosen_': chosen_sequence_tokens,
'rejected_': rejected_sequence_tokens,
'': prompt_tokens,
}.items():
for type_key, tokens in toks.items():
if type_key == 'token_type_ids':
continue
batch[f'{k}{type_key}'] = tokens
else:
# encoder-decoder
batch = super().tokenize_row(feature, model)
return batch
@staticmethod
def stat_dataset(llm_dataset) -> Any:
_token_len = []
from datasets import Dataset as HfDataset
from swift.utils.np_utils import stat_array
if isinstance(llm_dataset, HfDataset):
chosen = llm_dataset['chosen_input_ids']
rejected = llm_dataset['rejected_input_ids']
for cc, rr in zip(chosen, rejected):
_token_len.append(max(len(cc), len(rr)))
else:
for d in llm_dataset:
_token_len.append(max(len(d['chosen_input_ids']), len(d['rejected_input_ids'])))
_, stat_str = stat_array(_token_len)
logger.info(f'Dataset Token Length: {stat_str}')
return stat_str
trainer.DEFAULT_PROGRESS_CALLBACK = ProgressCallbackNew
trainer.DEFAULT_CALLBACKS = [DefaultFlowCallbackNew]
trainer.PrinterCallback = PrinterCallbackNew
| swift/swift/trainers/orpo_trainer.py/0 | {
"file_path": "swift/swift/trainers/orpo_trainer.py",
"repo_id": "swift",
"token_count": 3106
} | 217 |
# Copyright (c) Alibaba, Inc. and its affiliates.
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
import importlib
import math
import re
import warnings
from itertools import chain
from typing import Any, Dict, List, Optional
import importlib_metadata
import packaging
import peft
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging import version
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.lora import Conv2d as _Conv2d
from peft.tuners.lora import Embedding as _Embedding
from peft.tuners.lora import Linear as _Linear
from peft.tuners.lora import LoraLayer
from peft.tuners.lora import LoraModel as _LoraModel
from peft.tuners.lora.tp_layer import LoraParallelLinear as _LoraParallelLinear
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import _get_submodules, get_auto_gptq_quant_linear, get_quantization_config
from peft.utils.other import transpose
from transformers import Conv1D
from swift import LoraConfig, get_logger
from .utils import ActivationMixin, ModulesToSaveWrapper, SwiftAdapter
logger = get_logger()
dispatchers = []
def is_auto_awq_available():
return importlib.util.find_spec('awq') is not None
def is_aqlm_available():
return importlib.util.find_spec('aqlm') is not None
def is_eetq_available():
return importlib.util.find_spec('eetq') is not None
def is_hqq_available():
return importlib.util.find_spec('hqq') is not None
def is_auto_gptq_available():
try:
return peft.import_utils._is_auto_gptq_available()
except ImportError as e:
logger.warn(e)
return False
peft.import_utils._is_auto_gptq_available = peft.import_utils.is_auto_gptq_available
peft.import_utils.is_auto_gptq_available = is_auto_gptq_available
class LoRAActivationMixin(ActivationMixin):
@property
def active_adapters(self):
return self.get_activated_adapters()
@property
def active_adapter(self) -> str:
return self.get_activated_adapters()
def set_adapter(self, adapter_names, offload=None):
if isinstance(adapter_names, str):
adapter_names = [adapter_names]
# Deactivate grads on the inactive adapter and activate grads on the active adapter
for layer_name in self.adapter_layer_names:
module_dict = getattr(self, layer_name)
for key, layer in module_dict.items():
if key in adapter_names:
self.set_activation(key, True)
layer.requires_grad_(True)
SwiftAdapter.save_memory(layer, key, self.module_key, True)
else:
self.set_activation(key, False)
layer.requires_grad_(False)
SwiftAdapter.save_memory(layer, key, self.module_key, False, offload=offload)
def save_memory(self, adapter_name, activate, offload=None):
for layer_name in self.adapter_layer_names:
module_dict = getattr(self, layer_name)
for key, layer in module_dict.items():
if key == adapter_name:
if activate:
SwiftAdapter.save_memory(layer, layer_name + '.' + key, self.module_key, True)
else:
SwiftAdapter.save_memory(layer, layer_name + '.' + key, self.module_key, False, offload=offload)
def merge(self, *args, **kwargs):
if not self.unique_thread:
raise AssertionError('Merge is unsupported in multiple thread, '
'please set `USE_UNIQUE_THREAD=1` in env variable to merge LoRA.')
return super().merge(*args, **kwargs)
if is_bnb_available():
import bitsandbytes as bnb
from peft.tuners.lora.bnb import Linear8bitLt as _Linear8bitLt
class Linear8bitLt(LoRAActivationMixin, _Linear8bitLt):
def __init__(
self,
*args,
module_key: str,
**kwargs,
):
super(Linear8bitLt, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, module_key: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_8bit = kwargs.get('loaded_in_8bit', False)
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update({
'has_fp16_weights': target.state.has_fp16_weights,
'memory_efficient_backward': target.state.memory_efficient_backward,
'threshold': target.state.threshold,
'index': target.index,
})
new_module = Linear8bitLt(target, adapter_name, module_key=module_key, **eightbit_kwargs)
return new_module
dispatchers.append(dispatch_bnb_8bit)
if is_bnb_4bit_available():
from peft.tuners.lora.bnb import Linear4bit as _Linear4bit
class Linear4bit(LoRAActivationMixin, _Linear4bit):
def __init__(
self,
*args,
module_key: str,
**kwargs,
):
super(Linear4bit, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, module_key: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_4bit = kwargs.get('loaded_in_4bit', False)
if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update({
'compute_dtype': target_base_layer.compute_dtype,
'compress_statistics': target_base_layer.weight.compress_statistics,
'quant_type': target_base_layer.weight.quant_type,
})
new_module = Linear4bit(target, adapter_name, module_key=module_key, **fourbit_kwargs)
return new_module
dispatchers.append(dispatch_bnb_4bit)
if is_aqlm_available():
from peft.tuners.lora.aqlm import AqlmLoraLinear as _AqlmLoraLinear
from aqlm import QuantizedLinear
class AqlmLoraLinear(LoRAActivationMixin, _AqlmLoraLinear):
def __init__(
self,
*args,
module_key: str,
**kwargs,
):
super(AqlmLoraLinear, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
def dispatch_aqlm(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
new_module = AqlmLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.codes
return new_module
dispatchers.append(dispatch_aqlm)
if is_auto_awq_available():
from peft.tuners.lora.awq import AwqLoraLinear as _AwqLoraLinear
from awq.modules.linear import WQLinear_GEMM
class AwqLoraLinear(LoRAActivationMixin, _AwqLoraLinear):
def __init__(
self,
*args,
module_key: str,
**kwargs,
):
super(AwqLoraLinear, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
def dispatch_awq(
target: torch.nn.Module,
adapter_name: str,
module_key: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, WQLinear_GEMM):
# Raise the error only at the dispatch level
AUTOAWQ_MINIMUM_VERSION = packaging.version.parse('0.2.0')
version_autoawq = packaging.version.parse(importlib_metadata.version('autoawq'))
if AUTOAWQ_MINIMUM_VERSION > version_autoawq:
raise ImportError(f'Found an incompatible version of auto-awq. Found version {version_autoawq}, '
f'but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT.')
new_module = AwqLoraLinear(target, adapter_name, module_key=module_key, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
dispatchers.append(dispatch_awq)
if is_auto_gptq_available():
from peft.tuners.lora import QuantLinear as _QuantLinear
class QuantLinear(LoRAActivationMixin, _QuantLinear):
def __init__(
self,
base_layer,
adapter_name: str,
module_key: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
use_rslora: bool = False,
use_dora: bool = False,
use_qa_lora=False,
group_size=None,
**kwargs,
):
super(QuantLinear, self).__init__(module_key)
self.set_activation(adapter_name, True)
nn.Module.__init__(self)
self.group_size = group_size
self.use_qa_lora = use_qa_lora
if self.use_qa_lora:
assert self.group_size is not None, 'To use qa_lora you need to pass in the `group_size` param.'
self.qa_pool = torch.nn.AvgPool1d(self.group_size) # using pooling layer to conduct sum operation
LoraLayer.__init__(self, base_layer)
if use_dora:
raise ValueError(f'{_QuantLinear.__name__} does not support DoRA yet, please set it to False')
if self.use_qa_lora:
self.in_features = self.in_features // self.group_size
# self.base_layer and self.quant_linear_module are the same;
# we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
)
def forward(self, x: torch.Tensor):
# note: logic differs from default Linear because merging is not supported
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
if self.use_qa_lora:
x = self.qa_pool(x) * self.group_size
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result += output
return result
def dispatch_gptq(
target: torch.nn.Module,
adapter_name: str,
module_key: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
gptq_quantization_config = kwargs.get('gptq_quantization_config', None)
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
if AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear):
new_module = QuantLinear(target, adapter_name, module_key=module_key, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
dispatchers.append(dispatch_gptq)
if is_eetq_available():
from peft.tuners.lora.eetq import EetqLoraLinear as _EetqLoraLinear
from eetq import EetqLinear
class EetqLoraLinear(LoRAActivationMixin, _EetqLoraLinear):
def __init__(
self,
*args,
module_key: str,
**kwargs,
):
super(EetqLoraLinear, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
def dispatch_eetq(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_eetq_available() and isinstance(target_base_layer, EetqLinear):
new_module = EetqLoraLinear(target, adapter_name, **kwargs)
target.weight = target_base_layer.weight
if hasattr(target, 'bias'):
target.bias = target_base_layer.bias
return new_module
dispatchers.append(dispatch_eetq)
if is_hqq_available():
from peft.tuners.lora.hqq import HqqLoraLinear as _HqqLoraLinear
from hqq.core.quantize import HQQLinear
class HqqLoraLinear(LoRAActivationMixin, _HqqLoraLinear):
def __init__(
self,
*args,
module_key: str,
**kwargs,
):
super(HqqLoraLinear, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
def dispatch_hqq(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_hqq_available() and isinstance(target_base_layer, HQQLinear):
new_module = HqqLoraLinear(target_base_layer, adapter_name, **kwargs)
return new_module
dispatchers.append(dispatch_hqq)
def dispatch_megatron(
target: torch.nn.Module,
adapter_name: str,
lora_config,
module_key,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if lora_config.megatron_config:
megatron_core = importlib.import_module(lora_config.megatron_core)
else:
megatron_core = None
if megatron_core and isinstance(
target_base_layer,
(megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear)): # noqa
megatron_kwargs = kwargs.copy()
megatron_config = lora_config.megatron_config
if isinstance(megatron_config, dict):
transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig
megatron_config = transformer_config_class(**lora_config.megatron_config)
megatron_kwargs['megatron_config'] = megatron_config
if megatron_kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` '
'or `RowParallelLinear`. '
'Setting fan_in_fan_out to False.')
megatron_kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = False
new_module = LoraParallelLinear(
base_layer=target,
adapter_name=adapter_name,
module_key=module_key,
backend=megatron_core.tensor_parallel,
**megatron_kwargs)
return new_module
def dispatch_default(
target: torch.nn.Module,
adapter_name: str,
lora_config: LoraConfig,
module_key: str,
**kwargs,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Embedding):
embedding_kwargs = kwargs.copy()
embedding_kwargs.pop('fan_in_fan_out', None)
embedding_kwargs.update(lora_config.loftq_config)
new_module = Embedding(target, adapter_name, module_key=module_key, **embedding_kwargs)
elif isinstance(target_base_layer, torch.nn.Conv2d):
kwargs.update(lora_config.loftq_config)
new_module = Conv2d(target, adapter_name, module_key=module_key, **kwargs)
elif isinstance(target_base_layer, torch.nn.Linear):
if target_base_layer.__class__.__name__ == 'NonDynamicallyQuantizableLinear':
# Fix issue: https://github.com/modelscope/swift/issues/342
return None
if kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. '
'Setting fan_in_fan_out to False.')
kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = False
kwargs.update(lora_config.loftq_config)
new_module = Linear(target, adapter_name, module_key=module_key, **kwargs)
elif isinstance(target_base_layer, Conv1D):
if not kwargs['fan_in_fan_out']:
warnings.warn('fan_in_fan_out is set to False but the target module is `Conv1D`. '
'Setting fan_in_fan_out to True.')
kwargs['fan_in_fan_out'] = lora_config.fan_in_fan_out = True
kwargs.update(lora_config.loftq_config)
new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, module_key=module_key, **kwargs)
return new_module
dispatchers.append(dispatch_megatron)
dispatchers.append(dispatch_default)
class Embedding(LoRAActivationMixin, _Embedding):
def __init__(
self,
*args,
module_key: str,
**kwargs,
) -> None:
super(Embedding, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
class Linear(LoRAActivationMixin, _Linear):
def __init__(self, *args, module_key: str, **kwargs):
super(Linear, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
def device_hook(module, args):
for active_adapter in self.active_adapters:
if active_adapter in self.lora_A:
self.lora_A[active_adapter].to(args[0].device)
self.lora_B[active_adapter].to(args[0].device)
self.register_forward_pre_hook(device_hook)
def update_layer(self,
adapter_name,
r,
lora_alpha,
lora_dropout,
init_lora_weights,
use_rslora,
use_dora: bool = False):
# This code works for linear layers, override for other layer types
if r <= 0:
raise ValueError(f'`r` should be a positive integer value but the value passed is {r}')
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
# Actual trainable parameters
self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False)
self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False)
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
if isinstance(init_lora_weights, str) and init_lora_weights.startswith('pissa'):
self.pissa_init(adapter_name, init_lora_weights)
elif init_lora_weights == 'loftq':
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
# check weight and qweight (for GPTQ)
for weight_name in ('weight', 'qweight'):
weight = getattr(self.get_base_layer(), weight_name, None)
if weight is not None:
if weight.device != torch.device('meta'):
# the layer is already completely initialized, this is an update
if weight.dtype.is_floating_point or weight.dtype.is_complex:
self.to(weight.device, dtype=weight.dtype)
else:
self.to(weight.device)
break
elif weight.dtype.is_floating_point or weight.dtype.is_complex:
self.to(dtype=weight.dtype)
break
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
class Conv2d(LoRAActivationMixin, _Conv2d):
def __init__(self, *args, module_key: str, **kwargs):
super(Conv2d, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
class LoraParallelLinear(LoRAActivationMixin, _LoraParallelLinear):
def __init__(self, *args, module_key: str, **kwargs):
super(LoraParallelLinear, self).__init__(module_key)
self.set_activation(args[1], True)
super(ActivationMixin, self).__init__(*args, **kwargs)
class LoraModel(_LoraModel):
prefix: str = 'lora_'
def __init__(self, model, config, adapter_name):
if config is not None:
super().__init__(model, config, adapter_name)
else:
nn.Module.__init__(self)
self.model = model
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == 'none':
continue
if bias == 'all':
for n, p in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'lora_only':
for m in model.modules():
if isinstance(m, LoraLayer) and hasattr(m, 'bias') and m.bias is not None:
m.bias.requires_grad = True
else:
raise NotImplementedError(f'Requested bias: {bias}, is not implemented.')
def inject_adapter(self, model: nn.Module, adapter_name: str):
r"""
Override code:
1. ModulesToSaveWrapper construction method: add module_key=key argument to offload to cpu
"""
peft_config = self.peft_config[adapter_name]
# Note: If possible, all checks should be performed *at the start of this method*.
# This way, we can raise early if something goes wrong, without leaving the model
# in a bad (half-initialized) state.
self._check_new_adapter_config(peft_config)
is_target_modules_in_base_model = False
key_list = [key for key, _ in model.named_modules()]
_check_for_modules_to_save = getattr(peft_config, 'modules_to_save', None) is not None
_has_modules_to_save = False
model_config = getattr(model, 'config', {'model_type': 'custom'})
if hasattr(model_config, 'to_dict'):
model_config = model_config.to_dict()
peft_config = self._prepare_adapter_config(peft_config, model_config)
from peft.tuners.tuners_utils import _maybe_include_all_linear_layers
# update peft_config.target_modules if required
peft_config = _maybe_include_all_linear_layers(peft_config, model)
self._prepare_model(peft_config, model)
for key in key_list:
# Check for modules_to_save in case
if _check_for_modules_to_save and any(
key.endswith(f'{module_to_save}') for module_to_save in peft_config.modules_to_save):
# Optionally set the modules to save
parent, target, target_name = _get_submodules(model, key)
if not isinstance(target, ModulesToSaveWrapper):
new_module = ModulesToSaveWrapper(target, adapter_name=adapter_name, module_key=key)
setattr(parent, target_name, new_module)
else:
target.update(adapter_name)
_has_modules_to_save = True
continue
if not self._check_target_module_exists(peft_config, key):
continue
self.targeted_module_names.append(key)
is_target_modules_in_base_model = True
parent, target, target_name = _get_submodules(model, key)
self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key)
if not is_target_modules_in_base_model:
raise ValueError(f'Target modules {peft_config.target_modules} not found in the base model. '
f'Please check the target modules and try again.')
self._mark_only_adapters_as_trainable(self.model)
if self.peft_config[adapter_name].inference_mode:
for n, p in self.model.named_parameters():
if adapter_name in n:
p.requires_grad = False
if _has_modules_to_save:
if not hasattr(model, 'modules_to_save'):
model.modules_to_save = set(peft_config.modules_to_save)
else:
model.modules_to_save.update(set(peft_config.modules_to_save))
def _convert_dtype(self, target: nn.Module, lora_dtype: str):
if lora_dtype == 'fp32':
torch_dtype = torch.float32
elif lora_dtype == 'fp16':
torch_dtype = torch.float16
elif lora_dtype == 'bf16':
torch_dtype = torch.bfloat16
else:
torch_dtype = None
if torch_dtype is not None:
if hasattr(target, 'lora_A'):
target.lora_A.to(torch_dtype)
target.lora_B.to(torch_dtype)
if hasattr(target, 'lora_embedding_A'):
target.lora_embedding_A.to(torch_dtype)
target.lora_embedding_B.to(torch_dtype)
def _create_and_replace(
self,
lora_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
"""
Override code:
1. Import bnb from upper code
2. Support dtype converting
3. Support skipping NonDynamicallyQuantizableLinear
4. Add current_key argument to _create_new_module
5. Use Class type defined here
6. Allow new_module being None
"""
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
# Regexp matching - Find key which matches current target_name in patterns provided
pattern_keys = list(chain(lora_config.rank_pattern.keys(), lora_config.alpha_pattern.keys()))
target_name_key = next(filter(lambda key: re.match(rf'.*\.{key}$', current_key), pattern_keys), current_key)
r = lora_config.rank_pattern.get(target_name_key, lora_config.r)
alpha = lora_config.alpha_pattern.get(target_name_key, lora_config.lora_alpha)
kwargs = {
'r': r,
'lora_alpha': alpha,
'lora_dropout': lora_config.lora_dropout,
'fan_in_fan_out': lora_config.fan_in_fan_out,
'init_lora_weights': lora_config.init_lora_weights,
'use_rslora': lora_config.use_rslora,
'use_dora': lora_config.use_dora,
'loaded_in_8bit': getattr(self.model, 'is_loaded_in_8bit', False),
'loaded_in_4bit': getattr(self.model, 'is_loaded_in_4bit', False),
}
quant_methods = ['gptq', 'aqlm', 'awq']
for quant_method in quant_methods:
quantization_config = get_quantization_config(self.model, method=quant_method)
if quantization_config is not None:
kwargs[f'{quant_method}_quantization_config'] = quantization_config
# note: AdaLoraLayer is a subclass of LoraLayer, we need to exclude it
from peft.tuners.adalora import AdaLoraLayer
if isinstance(target, LoraLayer) and not isinstance(target, AdaLoraLayer):
if target.__class__.__name__ == 'NonDynamicallyQuantizableLinear':
# Fix issue: https://github.com/modelscope/swift/issues/342
return
target.update_layer(
adapter_name,
r,
lora_alpha=alpha,
lora_dropout=lora_config.lora_dropout,
init_lora_weights=lora_config.init_lora_weights,
use_rslora=lora_config.use_rslora,
use_dora=lora_config.use_dora,
)
self._convert_dtype(target, lora_config.lora_dtype)
else:
new_module = self._create_new_module(lora_config, adapter_name, target, current_key=current_key, **kwargs)
if new_module is not None:
if adapter_name != self.active_adapter:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
self._convert_dtype(new_module, lora_config.lora_dtype)
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
# It's not necessary to set requires_grad here, as that is handled by
# _mark_only_adapters_as_trainable
# child layer wraps the original module, unpack it
if hasattr(child, 'base_layer'):
child = child.base_layer
if not hasattr(new_module, 'base_layer'):
new_module.weight = child.weight
if hasattr(child, 'bias'):
new_module.bias = child.bias
if getattr(child, 'state', None) is not None:
if hasattr(new_module, 'base_layer'):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
# dispatch to correct device
for name, module in new_module.named_modules():
if (self.prefix in name) or ('ranknum' in name):
weight = (
child.qweight
if hasattr(child, 'qweight') else child.W_q if hasattr(child, 'W_q') else child.weight)
module.to(weight.device)
@staticmethod
def _create_new_module(lora_config, adapter_name, target, **kwargs):
"""
Override code:
1. Support current_key argument
2. Support MergedLinear
3. Support skipping NonDynamicallyQuantizableLinear(Move to dispatcher)
4. Use Class type defined here(Move to dispatcher)
5. return None instead of raising error when target type not found
"""
# Collect dispatcher functions to decide what backend to use for the replaced LoRA layer. The order matters,
# because the first match is always used. Therefore, the default layers should be checked last.
current_key = kwargs.pop('current_key')
new_module = None
if lora_config.use_qa_lora:
kwargs['use_qa_lora'] = True
kwargs['group_size'] = lora_config.group_size
if lora_config.use_merged_linear:
bias = kwargs.pop('bias', False)
new_module = MergedLinear(
adapter_name, current_key, target, bias=bias, enable_lora=lora_config.enable_lora, **kwargs)
else:
for dispatcher in dispatchers:
new_module = dispatcher(target, adapter_name, lora_config=lora_config, module_key=current_key, **kwargs)
if new_module is not None: # first match wins
break
if new_module is None:
# no module could be matched
logger.debug(
f'Target module {target} is not supported. Currently, only the following modules are supported: '
'`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`.')
new_module = None
return new_module
class LoRALayer(ActivationMixin):
def __init__(
self,
adapter_name: str,
module_key: str,
r: int,
lora_alpha: int,
lora_dropout: float,
merge_weights: bool,
):
super().__init__(module_key)
self.adapter_name = adapter_name
self.r = r
self.lora_alpha = lora_alpha
# Optional dropout
if lora_dropout > 0.:
self.lora_dropout = nn.Dropout(p=lora_dropout)
else:
self.lora_dropout = lambda x: x
# Mark the weight as unmerged
self.merged = False
self.merge_weights = merge_weights
if not self._unique_thread:
self.merge_weights = False
class MergedLinear(nn.Linear, LoRALayer):
# LoRA implemented in a dense layer
def __init__(self,
adapter_name: str,
module_key: str,
base_layer: nn.Linear,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.,
enable_lora: List[bool] = [False],
fan_in_fan_out: bool = False,
merge_weights: bool = True,
bias: bool = True,
device=None,
dtype=None,
**kwargs):
nn.Linear.__init__(self, base_layer.in_features, base_layer.out_features, bias=bias, device=device, dtype=dtype)
LoRALayer.__init__(
self,
adapter_name,
module_key,
r=r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
merge_weights=merge_weights)
assert base_layer.out_features % len(enable_lora) == 0, \
'The length of enable_lora must divide out_features'
self.enable_lora = enable_lora
self.fan_in_fan_out = fan_in_fan_out
self.base_layer = base_layer
# Actual trainable parameters
if r > 0 and any(enable_lora):
self.lora_A = nn.Parameter(self.weight.new_zeros((r * sum(enable_lora), base_layer.in_features)))
self.lora_B = nn.Parameter(
self.weight.new_zeros((base_layer.out_features // len(enable_lora) * sum(enable_lora),
r))) # weights for Conv1D with groups=sum(enable_lora)
self.scaling = self.lora_alpha / self.r
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
# Compute the indices
self.lora_ind = self.weight.new_zeros((base_layer.out_features, ),
dtype=torch.bool).view(len(enable_lora), -1)
self.lora_ind[enable_lora, :] = True
self.lora_ind = self.lora_ind.view(-1)
self.reset_parameters()
self.weight = self.base_layer.weight
if getattr(self.base_layer, 'bias', None) is not None:
self.bias = self.base_layer.bias
if fan_in_fan_out:
self.weight.data = self.weight.data.transpose(0, 1)
def reset_parameters(self):
nn.Linear.reset_parameters(self)
if hasattr(self, 'lora_A'):
# initialize A the same way as the default for nn.Linear and B to zero
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
def zero_pad(self, x):
result = x.new_zeros((len(self.lora_ind), *x.shape[1:]))
result[self.lora_ind] = x
return result
def merge_AB(self):
def T(w):
return w.transpose(0, 1) if self.fan_in_fan_out else w
delta_w = F.conv1d(self.lora_A.unsqueeze(0), self.lora_B.unsqueeze(-1), groups=sum(self.enable_lora)).squeeze(0)
return T(self.zero_pad(delta_w))
def merge(self, **kwargs):
if self.merge_weights and not self.merged:
# Merge the weights and mark it
if self.r > 0 and any(self.enable_lora):
self.weight.data += self.merge_AB() * self.scaling
def unmerge(self, **kwargs):
if self.merge_weights and self.merged:
# Make sure that the weights are not merged
if self.r > 0 and any(self.enable_lora):
self.weight.data -= self.merge_AB() * self.scaling
self.merged = False
def forward(self, x: torch.Tensor, **kwargs):
def T(w):
return w.transpose(0, 1) if self.fan_in_fan_out else w
if self.merged or not self.is_activated(self.adapter_name):
return F.linear(x, T(self.weight), bias=self.bias)
else:
result = F.linear(x, T(self.weight), bias=self.bias)
if self.r > 0:
x_dtype = x.dtype
x = x.to(self.lora_A.dtype)
result += self.lora_dropout(x) @ T(self.merge_AB().T) * self.scaling
result = result.to(x_dtype)
return result
def mark_lora_as_trainable(model: nn.Module, adapter_name: str, bias: str = 'none') -> None:
if bias == 'none':
return
elif bias == 'all':
for n, p in model.named_parameters():
if 'bias' in n:
p.requires_grad = True
elif bias == 'lora_only':
for n, m in model.named_modules():
if 'lora_' in n and f'.{adapter_name}' in n and \
hasattr(m, 'bias') and \
m.bias is not None:
m.bias.requires_grad = True
else:
raise NotImplementedError
def lora_state_dict(state_dict, adapter_name: str, bias: str = 'none') -> Dict[str, torch.Tensor]:
if bias == 'none':
to_return = {k: state_dict[k] for k in state_dict if 'lora_' in k}
elif bias == 'all':
to_return = {k: state_dict[k] for k in state_dict if 'lora_' in k or 'bias' in k}
elif bias == 'lora_only':
to_return = {}
for k in state_dict:
if 'lora_' in k:
to_return[k] = state_dict[k]
bias_name = k.split('lora_')[0] + 'bias'
if bias_name in state_dict:
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
return {k: v for k, v in to_return.items() if (('lora_' in k and f'.{adapter_name}' in k) or ('bias' in k))}
| swift/swift/tuners/lora_layers.py/0 | {
"file_path": "swift/swift/tuners/lora_layers.py",
"repo_id": "swift",
"token_count": 19432
} | 218 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from swift.utils.logger import get_logger
logger = get_logger()
def detach_tensors(feats):
if type(feats) in [list, tuple]:
feats = [detach_tensors(feat) if feat is not None else None for feat in feats]
elif isinstance(feats, dict):
feats = {key: detach_tensors(val) for key, val in feats.items()}
elif isinstance(feats, torch.Tensor):
feats = feats.detach()
else:
feats = feats.detach()
return feats
def probe_tensors(module, feats, name):
feats = detach_tensors(feats)
setattr(module, name, feats)
def probe_input_pre_hook(self, args):
input = args[0]
probe_tensors(self, input, 'probe_input_data')
return args
def probe_output_hook(self, args, result):
output = result
probe_tensors(self, output, 'probe_output_data')
return output
def choose_weight_type(weight_type, dim):
if weight_type == 'gate':
scaling = nn.Linear(dim, 1)
elif weight_type == 'scale':
scaling = nn.Parameter(torch.Tensor(1))
scaling.data.fill_(1)
elif weight_type == 'scale_channel':
scaling = nn.Parameter(torch.Tensor(dim))
scaling.data.fill_(1)
elif weight_type and weight_type.startswith('scalar'):
scaling = float(weight_type.split('_')[-1])
else:
scaling = None
return scaling
def get_weight_value(weight_type, scaling, x):
if weight_type in ['gate']:
scaling = torch.mean(torch.sigmoid(scaling(x)), dim=1).view(-1, 1, 1)
elif weight_type in ['scale', 'scale_channel'] or weight_type.startswith('scalar'):
scaling = scaling
else:
scaling = None
return scaling
class SCEAdapter(nn.Module):
def __init__(self,
dim,
adapter_length,
adapter_type=None,
adapter_weight=None,
act_layer=nn.GELU,
zero_init_last=True,
use_bias=True):
super(SCEAdapter, self).__init__()
self.dim = dim
self.adapter_length = adapter_length
self.adapter_type = adapter_type
self.adapter_weight = adapter_weight
self.zero_init_last = zero_init_last
self.ln1 = nn.Linear(dim, adapter_length, bias=use_bias)
self.activate = act_layer()
self.ln2 = nn.Linear(adapter_length, dim, bias=use_bias)
self.init_weights()
self.init_scaling()
def _zero_init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.zeros_(m.weight)
nn.init.zeros_(m.bias)
def _kaiming_init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
def init_weights(self):
self._kaiming_init_weights(self.ln1)
if self.zero_init_last:
self._zero_init_weights(self.ln2)
else:
self._kaiming_init_weights(self.ln2)
def init_scaling(self):
if self.adapter_weight:
self.scaling = choose_weight_type(self.adapter_weight, self.dim)
else:
self.scaling = None
def forward(self, x, x_shortcut=None, use_shortcut=True, **kwargs):
if x_shortcut is None:
x_shortcut = x
x_shape = x.shape
if len(x_shape) == 4:
b, d, h, w = x_shape
x = x.permute(0, 2, 3, 1).reshape(b, h * w, d)
out = self.ln2(self.activate(self.ln1(x)))
if self.adapter_weight:
scaling = get_weight_value(self.adapter_weight, self.scaling, out)
out = out * scaling if scaling is not None else out
if len(x_shape) == 4:
b, d, h, w = x_shape
out = out.reshape(b, h, w, -1).permute(0, 3, 1, 2).contiguous()
if use_shortcut:
out = x_shortcut + out
return out
| swift/swift/tuners/scetuning/scetuning_components.py/0 | {
"file_path": "swift/swift/tuners/scetuning/scetuning_components.py",
"repo_id": "swift",
"token_count": 1886
} | 219 |
import os
import re
import sys
import time
from datetime import datetime
from functools import partial
from typing import Type
import gradio as gr
import json
import torch
from gradio import Accordion, Tab
from swift import snapshot_download
from swift.llm import EvalArguments
from swift.ui.base import BaseUI
from swift.ui.llm_eval.eval import Eval
from swift.ui.llm_eval.model import Model
from swift.ui.llm_eval.runtime import EvalRuntime
class LLMEval(BaseUI):
group = 'llm_eval'
sub_ui = [Model, Eval, EvalRuntime]
cmd = 'eval'
locale_dict = {
'llm_eval': {
'label': {
'zh': 'LLM评测',
'en': 'LLM evaluation',
}
},
'more_params': {
'label': {
'zh': '更多参数',
'en': 'More params'
},
'info': {
'zh': '以json格式填入',
'en': 'Fill in with json format'
}
},
'evaluate': {
'value': {
'zh': '开始评测',
'en': 'Begin Evaluation'
},
},
'gpu_id': {
'label': {
'zh': '选择可用GPU',
'en': 'Choose GPU'
},
'info': {
'zh': '选择训练使用的GPU号,如CUDA不可用只能选择CPU',
'en': 'Select GPU to train'
}
},
}
choice_dict = BaseUI.get_choices_from_dataclass(EvalArguments)
default_dict = BaseUI.get_default_value_from_dataclass(EvalArguments)
arguments = BaseUI.get_argument_names(EvalArguments)
@classmethod
def do_build_ui(cls, base_tab: Type['BaseUI']):
with gr.TabItem(elem_id='llm_eval', label=''):
gpu_count = 0
default_device = 'cpu'
if torch.cuda.is_available():
gpu_count = torch.cuda.device_count()
default_device = '0'
with gr.Blocks():
model_and_template = gr.State([])
Model.build_ui(base_tab)
Eval.build_ui(base_tab)
EvalRuntime.build_ui(base_tab)
with gr.Row():
gr.Textbox(elem_id='more_params', lines=4, scale=20)
gr.Button(elem_id='evaluate', scale=2, variant='primary')
gr.Dropdown(
elem_id='gpu_id',
multiselect=True,
choices=[str(i) for i in range(gpu_count)] + ['cpu'],
value=default_device,
scale=8)
cls.element('evaluate').click(
cls.eval_model,
[value for value in cls.elements().values() if not isinstance(value, (Tab, Accordion))],
[cls.element('runtime_tab'),
cls.element('running_tasks'), model_and_template])
base_tab.element('running_tasks').change(
partial(EvalRuntime.task_changed, base_tab=base_tab), [base_tab.element('running_tasks')],
[value for value in base_tab.elements().values() if not isinstance(value, (Tab, Accordion))]
+ [cls.element('log'), model_and_template],
cancels=EvalRuntime.log_event)
EvalRuntime.element('kill_task').click(
EvalRuntime.kill_task,
[EvalRuntime.element('running_tasks')],
[EvalRuntime.element('running_tasks')] + [EvalRuntime.element('log')],
cancels=[EvalRuntime.log_event],
)
@classmethod
def eval(cls, *args):
eval_args = cls.get_default_value_from_dataclass(EvalArguments)
kwargs = {}
kwargs_is_list = {}
other_kwargs = {}
more_params = {}
keys = [key for key, value in cls.elements().items() if not isinstance(value, (Tab, Accordion))]
for key, value in zip(keys, args):
compare_value = eval_args.get(key)
compare_value_arg = str(compare_value) if not isinstance(compare_value, (list, dict)) else compare_value
compare_value_ui = str(value) if not isinstance(value, (list, dict)) else value
if key in eval_args and compare_value_ui != compare_value_arg and value:
if isinstance(value, str) and re.fullmatch(cls.int_regex, value):
value = int(value)
elif isinstance(value, str) and re.fullmatch(cls.float_regex, value):
value = float(value)
elif isinstance(value, str) and re.fullmatch(cls.bool_regex, value):
value = True if value.lower() == 'true' else False
kwargs[key] = value if not isinstance(value, list) else ' '.join(value)
kwargs_is_list[key] = isinstance(value, list)
else:
other_kwargs[key] = value
if key == 'more_params' and value:
more_params = json.loads(value)
kwargs.update(more_params)
if kwargs['model_type'] == cls.locale('checkpoint', cls.lang)['value']:
model_dir = kwargs.pop('model_id_or_path')
if not os.path.exists(model_dir):
model_dir = snapshot_download(model_dir)
kwargs['ckpt_dir'] = model_dir
eval_args = EvalArguments(
**{
key: value.split(' ') if key in kwargs_is_list and kwargs_is_list[key] else value
for key, value in kwargs.items()
})
params = ''
for e in kwargs:
if e in kwargs_is_list and kwargs_is_list[e]:
params += f'--{e} {kwargs[e]} '
else:
params += f'--{e} "{kwargs[e]}" '
devices = other_kwargs['gpu_id']
devices = [d for d in devices if d]
assert (len(devices) == 1 or 'cpu' not in devices)
gpus = ','.join(devices)
cuda_param = ''
if gpus != 'cpu':
cuda_param = f'CUDA_VISIBLE_DEVICES={gpus}'
now = datetime.now()
time_str = f'{now.year}{now.month}{now.day}{now.hour}{now.minute}{now.second}'
file_path = f'output/{eval_args.model_type}-{time_str}'
if not os.path.exists(file_path):
os.makedirs(file_path, exist_ok=True)
log_file = os.path.join(os.getcwd(), f'{file_path}/run_eval.log')
eval_args.log_file = log_file
params += f'--log_file "{log_file}" '
params += '--ignore_args_error true '
if sys.platform == 'win32':
if cuda_param:
cuda_param = f'set {cuda_param} && '
run_command = f'{cuda_param}start /b swift eval {params} > {log_file} 2>&1'
else:
run_command = f'{cuda_param} nohup swift eval {params} > {log_file} 2>&1 &'
return run_command, eval_args, log_file
@classmethod
def eval_model(cls, *args):
run_command, eval_args, log_file = cls.eval(*args)
os.system(run_command)
time.sleep(2)
return gr.update(open=True), EvalRuntime.refresh_tasks(log_file), [eval_args.sft_type]
| swift/swift/ui/llm_eval/llm_eval.py/0 | {
"file_path": "swift/swift/ui/llm_eval/llm_eval.py",
"repo_id": "swift",
"token_count": 3748
} | 220 |
from typing import Type
import gradio as gr
from swift.llm import MODEL_MAPPING, TEMPLATE_MAPPING, ModelType
from swift.ui.base import BaseUI
class Model(BaseUI):
group = 'llm_train'
locale_dict = {
'model_type': {
'label': {
'zh': '选择模型',
'en': 'Select Model'
},
'info': {
'zh': 'SWIFT已支持的模型名称',
'en': 'Base model supported by SWIFT'
}
},
'model_id_or_path': {
'label': {
'zh': '模型id或路径',
'en': 'Model id or path'
},
'info': {
'zh': '实际的模型id',
'en': 'The actual model id or model path'
}
},
'template_type': {
'label': {
'zh': '模型Prompt模板类型',
'en': 'Prompt template type'
},
'info': {
'zh': '选择匹配模型的Prompt模板',
'en': 'Choose the template type of the model'
}
},
'system': {
'label': {
'zh': 'system字段',
'en': 'system'
},
'info': {
'zh': '选择system字段的内容',
'en': 'Choose the content of the system field'
}
},
'reset': {
'value': {
'zh': '恢复初始值',
'en': 'Reset to default'
},
},
}
@classmethod
def do_build_ui(cls, base_tab: Type['BaseUI']):
with gr.Row():
model_type = gr.Dropdown(
elem_id='model_type', choices=ModelType.get_model_name_list() + cls.get_custom_name_list(), scale=20)
model_id_or_path = gr.Textbox(elem_id='model_id_or_path', lines=1, scale=20, interactive=True)
template_type = gr.Dropdown(
elem_id='template_type', choices=list(TEMPLATE_MAPPING.keys()) + ['AUTO'], scale=20)
reset_btn = gr.Button(elem_id='reset', scale=2)
model_state = gr.State({})
with gr.Row():
system = gr.Textbox(elem_id='system', lines=1, scale=20)
def update_input_model(choice, model_state=None):
if choice is None:
return None, None, None
if model_state and choice in model_state:
model_id_or_path = model_state[choice]
else:
model_id_or_path = MODEL_MAPPING[choice]['model_id_or_path']
default_system = getattr(TEMPLATE_MAPPING[MODEL_MAPPING[choice]['template']]['template'], 'default_system',
None)
template = MODEL_MAPPING[choice]['template']
return model_id_or_path, default_system, template
def update_model_id_or_path(model_type, model_id_or_path, model_state):
if model_type is None or isinstance(model_type, list):
return model_state
model_state[model_type] = model_id_or_path
return model_state
def reset(model_type):
model_id_or_path, default_system, template = update_input_model(model_type)
return model_id_or_path, default_system, template, {}
model_type.change(
update_input_model, inputs=[model_type, model_state], outputs=[model_id_or_path, system, template_type])
model_id_or_path.change(
update_model_id_or_path, inputs=[model_type, model_id_or_path, model_state], outputs=[model_state])
reset_btn.click(reset, inputs=[model_type], outputs=[model_id_or_path, system, template_type, model_state])
| swift/swift/ui/llm_train/model.py/0 | {
"file_path": "swift/swift/ui/llm_train/model.py",
"repo_id": "swift",
"token_count": 2036
} | 221 |
import math
import unittest
import torch
from modelscope import Model, Preprocessor
from torch import nn
from swift import LoRAConfig, Swift
class TestMergedLinear(unittest.TestCase):
def test_swift_lora_forward(self):
from swift.tuners.lora import MergedLinear
def reset_parameters(self):
nn.Linear.reset_parameters(self)
if hasattr(self, 'lora_A'):
# initialize A the same way as the default for nn.Linear and B to zero
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.ones_(self.lora_B)
MergedLinear.reset_parameters = reset_parameters
model = Model.from_pretrained('damo/nlp_structbert_sentence-similarity_chinese-base')
preprocessor = Preprocessor.from_pretrained('damo/nlp_structbert_sentence-similarity_chinese-base')
inputs = preprocessor('how are you')
lora_config = LoRAConfig(
target_modules=['query', 'key', 'value'], use_merged_linear=True, enable_lora=[True, True, True])
outputs = model(**inputs)
model = Swift.prepare_model(model, config=lora_config)
model.eval()
outputs_lora = model(**inputs)
model.deactivate_adapter('default')
outputs_deactivate = model(**inputs)
model.activate_adapter('default')
outputs_reactivate = model(**inputs)
Swift.merge_and_unload(model)
outputs_merged = model(**inputs)
self.assertTrue(torch.allclose(outputs.logits, outputs_deactivate.logits))
self.assertTrue(not torch.allclose(outputs.logits, outputs_lora.logits))
self.assertTrue(torch.allclose(outputs_lora.logits, outputs_reactivate.logits))
self.assertTrue(torch.allclose(outputs_lora.logits, outputs_merged.logits, atol=1e-4))
| swift/tests/tuners/test_merged_linear.py/0 | {
"file_path": "swift/tests/tuners/test_merged_linear.py",
"repo_id": "swift",
"token_count": 779
} | 222 |
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns:xlink="http://www.w3.org/1999/xlink" width="350.696449pt" height="268.034375pt" viewBox="0 0 350.696449 268.034375" xmlns="http://www.w3.org/2000/svg" version="1.1">
<metadata>
<rdf:RDF xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<cc:Work>
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
<dc:date>2023-11-18T11:28:03.028228</dc:date>
<dc:format>image/svg+xml</dc:format>
<dc:creator>
<cc:Agent>
<dc:title>Matplotlib v3.7.1, https://matplotlib.org/</dc:title>
</cc:Agent>
</dc:creator>
</cc:Work>
</rdf:RDF>
</metadata>
<defs>
<style type="text/css">*{stroke-linejoin: round; stroke-linecap: butt}</style>
</defs>
<g id="figure_1">
<g id="patch_1">
<path d="M 0 268.034375
L 350.696449 268.034375
L 350.696449 0
L 0 0
z
" style="fill: #ffffff"/>
</g>
<g id="axes_1">
<g id="patch_2">
<path d="M 7.2 244.078125
L 342 244.078125
L 342 22.318125
L 7.2 22.318125
z
" style="fill: #ffffff"/>
</g>
<g id="matplotlib.axis_1">
<g id="xtick_1">
<g id="line2d_1">
<defs>
<path id="md49eeea5b7" d="M 0 0
L 0 3.5
" style="stroke: #000000; stroke-width: 0.8"/>
</defs>
<g>
<use xlink:href="#md49eeea5b7" x="56.236364" y="244.078125" style="stroke: #000000; stroke-width: 0.8"/>
</g>
</g>
<g id="text_1">
<!-- Training Speed -->
<g transform="translate(14.12777 258.676562) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-54" d="M 31 4666
L 4331 4666
L 4331 3756
L 2784 3756
L 2784 0
L 1581 0
L 1581 3756
L 31 3756
L 31 4666
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-72" d="M 3138 2547
Q 2991 2616 2845 2648
Q 2700 2681 2553 2681
Q 2122 2681 1889 2404
Q 1656 2128 1656 1613
L 1656 0
L 538 0
L 538 3500
L 1656 3500
L 1656 2925
Q 1872 3269 2151 3426
Q 2431 3584 2822 3584
Q 2878 3584 2943 3579
Q 3009 3575 3134 3559
L 3138 2547
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-61" d="M 2106 1575
Q 1756 1575 1579 1456
Q 1403 1338 1403 1106
Q 1403 894 1545 773
Q 1688 653 1941 653
Q 2256 653 2472 879
Q 2688 1106 2688 1447
L 2688 1575
L 2106 1575
z
M 3816 1997
L 3816 0
L 2688 0
L 2688 519
Q 2463 200 2181 54
Q 1900 -91 1497 -91
Q 953 -91 614 226
Q 275 544 275 1050
Q 275 1666 698 1953
Q 1122 2241 2028 2241
L 2688 2241
L 2688 2328
Q 2688 2594 2478 2717
Q 2269 2841 1825 2841
Q 1466 2841 1156 2769
Q 847 2697 581 2553
L 581 3406
Q 941 3494 1303 3539
Q 1666 3584 2028 3584
Q 2975 3584 3395 3211
Q 3816 2838 3816 1997
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-69" d="M 538 3500
L 1656 3500
L 1656 0
L 538 0
L 538 3500
z
M 538 4863
L 1656 4863
L 1656 3950
L 538 3950
L 538 4863
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-6e" d="M 4056 2131
L 4056 0
L 2931 0
L 2931 347
L 2931 1631
Q 2931 2084 2911 2256
Q 2891 2428 2841 2509
Q 2775 2619 2662 2680
Q 2550 2741 2406 2741
Q 2056 2741 1856 2470
Q 1656 2200 1656 1722
L 1656 0
L 538 0
L 538 3500
L 1656 3500
L 1656 2988
Q 1909 3294 2193 3439
Q 2478 3584 2822 3584
Q 3428 3584 3742 3212
Q 4056 2841 4056 2131
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-67" d="M 2919 594
Q 2688 288 2409 144
Q 2131 0 1766 0
Q 1125 0 706 504
Q 288 1009 288 1791
Q 288 2575 706 3076
Q 1125 3578 1766 3578
Q 2131 3578 2409 3434
Q 2688 3291 2919 2981
L 2919 3500
L 4044 3500
L 4044 353
Q 4044 -491 3511 -936
Q 2978 -1381 1966 -1381
Q 1638 -1381 1331 -1331
Q 1025 -1281 716 -1178
L 716 -306
Q 1009 -475 1290 -558
Q 1572 -641 1856 -641
Q 2406 -641 2662 -400
Q 2919 -159 2919 353
L 2919 594
z
M 2181 2772
Q 1834 2772 1640 2515
Q 1447 2259 1447 1791
Q 1447 1309 1634 1061
Q 1822 813 2181 813
Q 2531 813 2725 1069
Q 2919 1325 2919 1791
Q 2919 2259 2725 2515
Q 2531 2772 2181 2772
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-20" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-53" d="M 3834 4519
L 3834 3531
Q 3450 3703 3084 3790
Q 2719 3878 2394 3878
Q 1963 3878 1756 3759
Q 1550 3641 1550 3391
Q 1550 3203 1689 3098
Q 1828 2994 2194 2919
L 2706 2816
Q 3484 2659 3812 2340
Q 4141 2022 4141 1434
Q 4141 663 3683 286
Q 3225 -91 2284 -91
Q 1841 -91 1394 -6
Q 947 78 500 244
L 500 1259
Q 947 1022 1364 901
Q 1781 781 2169 781
Q 2563 781 2772 912
Q 2981 1044 2981 1288
Q 2981 1506 2839 1625
Q 2697 1744 2272 1838
L 1806 1941
Q 1106 2091 782 2419
Q 459 2747 459 3303
Q 459 4000 909 4375
Q 1359 4750 2203 4750
Q 2588 4750 2994 4692
Q 3400 4634 3834 4519
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-70" d="M 1656 506
L 1656 -1331
L 538 -1331
L 538 3500
L 1656 3500
L 1656 2988
Q 1888 3294 2169 3439
Q 2450 3584 2816 3584
Q 3463 3584 3878 3070
Q 4294 2556 4294 1747
Q 4294 938 3878 423
Q 3463 -91 2816 -91
Q 2450 -91 2169 54
Q 1888 200 1656 506
z
M 2400 2772
Q 2041 2772 1848 2508
Q 1656 2244 1656 1747
Q 1656 1250 1848 986
Q 2041 722 2400 722
Q 2759 722 2948 984
Q 3138 1247 3138 1747
Q 3138 2247 2948 2509
Q 2759 2772 2400 2772
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-65" d="M 4031 1759
L 4031 1441
L 1416 1441
Q 1456 1047 1700 850
Q 1944 653 2381 653
Q 2734 653 3104 758
Q 3475 863 3866 1075
L 3866 213
Q 3469 63 3072 -14
Q 2675 -91 2278 -91
Q 1328 -91 801 392
Q 275 875 275 1747
Q 275 2603 792 3093
Q 1309 3584 2216 3584
Q 3041 3584 3536 3087
Q 4031 2591 4031 1759
z
M 2881 2131
Q 2881 2450 2695 2645
Q 2509 2841 2209 2841
Q 1884 2841 1681 2658
Q 1478 2475 1428 2131
L 2881 2131
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-64" d="M 2919 2988
L 2919 4863
L 4044 4863
L 4044 0
L 2919 0
L 2919 506
Q 2688 197 2409 53
Q 2131 -91 1766 -91
Q 1119 -91 703 423
Q 288 938 288 1747
Q 288 2556 703 3070
Q 1119 3584 1766 3584
Q 2128 3584 2408 3439
Q 2688 3294 2919 2988
z
M 2181 722
Q 2541 722 2730 984
Q 2919 1247 2919 1747
Q 2919 2247 2730 2509
Q 2541 2772 2181 2772
Q 1825 2772 1636 2509
Q 1447 2247 1447 1747
Q 1447 1247 1636 984
Q 1825 722 2181 722
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-54"/>
<use xlink:href="#DejaVuSans-Bold-72" x="57.212891"/>
<use xlink:href="#DejaVuSans-Bold-61" x="106.529297"/>
<use xlink:href="#DejaVuSans-Bold-69" x="174.009766"/>
<use xlink:href="#DejaVuSans-Bold-6e" x="208.287109"/>
<use xlink:href="#DejaVuSans-Bold-69" x="279.478516"/>
<use xlink:href="#DejaVuSans-Bold-6e" x="313.755859"/>
<use xlink:href="#DejaVuSans-Bold-67" x="384.947266"/>
<use xlink:href="#DejaVuSans-Bold-20" x="456.529297"/>
<use xlink:href="#DejaVuSans-Bold-53" x="491.34375"/>
<use xlink:href="#DejaVuSans-Bold-70" x="563.365234"/>
<use xlink:href="#DejaVuSans-Bold-65" x="634.947266"/>
<use xlink:href="#DejaVuSans-Bold-65" x="702.769531"/>
<use xlink:href="#DejaVuSans-Bold-64" x="770.591797"/>
</g>
</g>
</g>
<g id="xtick_2">
<g id="line2d_2">
<g>
<use xlink:href="#md49eeea5b7" x="174.6" y="244.078125" style="stroke: #000000; stroke-width: 0.8"/>
</g>
</g>
<g id="text_2">
<!-- Rouge Score -->
<g transform="translate(139.1875 258.598437) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-52" d="M 2297 2597
Q 2675 2597 2839 2737
Q 3003 2878 3003 3200
Q 3003 3519 2839 3656
Q 2675 3794 2297 3794
L 1791 3794
L 1791 2597
L 2297 2597
z
M 1791 1766
L 1791 0
L 588 0
L 588 4666
L 2425 4666
Q 3347 4666 3776 4356
Q 4206 4047 4206 3378
Q 4206 2916 3982 2619
Q 3759 2322 3309 2181
Q 3556 2125 3751 1926
Q 3947 1728 4147 1325
L 4800 0
L 3519 0
L 2950 1159
Q 2778 1509 2601 1637
Q 2425 1766 2131 1766
L 1791 1766
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-6f" d="M 2203 2784
Q 1831 2784 1636 2517
Q 1441 2250 1441 1747
Q 1441 1244 1636 976
Q 1831 709 2203 709
Q 2569 709 2762 976
Q 2956 1244 2956 1747
Q 2956 2250 2762 2517
Q 2569 2784 2203 2784
z
M 2203 3584
Q 3106 3584 3614 3096
Q 4122 2609 4122 1747
Q 4122 884 3614 396
Q 3106 -91 2203 -91
Q 1297 -91 786 396
Q 275 884 275 1747
Q 275 2609 786 3096
Q 1297 3584 2203 3584
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-75" d="M 500 1363
L 500 3500
L 1625 3500
L 1625 3150
Q 1625 2866 1622 2436
Q 1619 2006 1619 1863
Q 1619 1441 1641 1255
Q 1663 1069 1716 984
Q 1784 875 1895 815
Q 2006 756 2150 756
Q 2500 756 2700 1025
Q 2900 1294 2900 1772
L 2900 3500
L 4019 3500
L 4019 0
L 2900 0
L 2900 506
Q 2647 200 2364 54
Q 2081 -91 1741 -91
Q 1134 -91 817 281
Q 500 653 500 1363
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-63" d="M 3366 3391
L 3366 2478
Q 3138 2634 2908 2709
Q 2678 2784 2431 2784
Q 1963 2784 1702 2511
Q 1441 2238 1441 1747
Q 1441 1256 1702 982
Q 1963 709 2431 709
Q 2694 709 2930 787
Q 3166 866 3366 1019
L 3366 103
Q 3103 6 2833 -42
Q 2563 -91 2291 -91
Q 1344 -91 809 395
Q 275 881 275 1747
Q 275 2613 809 3098
Q 1344 3584 2291 3584
Q 2566 3584 2833 3536
Q 3100 3488 3366 3391
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-52"/>
<use xlink:href="#DejaVuSans-Bold-6f" x="77.001953"/>
<use xlink:href="#DejaVuSans-Bold-75" x="145.703125"/>
<use xlink:href="#DejaVuSans-Bold-67" x="216.894531"/>
<use xlink:href="#DejaVuSans-Bold-65" x="288.476562"/>
<use xlink:href="#DejaVuSans-Bold-20" x="356.298828"/>
<use xlink:href="#DejaVuSans-Bold-53" x="391.113281"/>
<use xlink:href="#DejaVuSans-Bold-63" x="463.134766"/>
<use xlink:href="#DejaVuSans-Bold-6f" x="522.412109"/>
<use xlink:href="#DejaVuSans-Bold-72" x="591.113281"/>
<use xlink:href="#DejaVuSans-Bold-65" x="640.429688"/>
</g>
</g>
</g>
<g id="xtick_3">
<g id="line2d_3">
<g>
<use xlink:href="#md49eeea5b7" x="292.963636" y="244.078125" style="stroke: #000000; stroke-width: 0.8"/>
</g>
</g>
<g id="text_3">
<!-- GPU Memory (GB) -->
<g transform="translate(242.430824 258.665625) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-47" d="M 4781 347
Q 4331 128 3847 18
Q 3363 -91 2847 -91
Q 1681 -91 1000 561
Q 319 1213 319 2328
Q 319 3456 1012 4103
Q 1706 4750 2913 4750
Q 3378 4750 3804 4662
Q 4231 4575 4609 4403
L 4609 3438
Q 4219 3659 3833 3768
Q 3447 3878 3059 3878
Q 2341 3878 1952 3476
Q 1563 3075 1563 2328
Q 1563 1588 1938 1184
Q 2313 781 3003 781
Q 3191 781 3352 804
Q 3513 828 3641 878
L 3641 1784
L 2906 1784
L 2906 2591
L 4781 2591
L 4781 347
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-50" d="M 588 4666
L 2584 4666
Q 3475 4666 3951 4270
Q 4428 3875 4428 3144
Q 4428 2409 3951 2014
Q 3475 1619 2584 1619
L 1791 1619
L 1791 0
L 588 0
L 588 4666
z
M 1791 3794
L 1791 2491
L 2456 2491
Q 2806 2491 2997 2661
Q 3188 2831 3188 3144
Q 3188 3456 2997 3625
Q 2806 3794 2456 3794
L 1791 3794
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-55" d="M 588 4666
L 1791 4666
L 1791 1869
Q 1791 1291 1980 1042
Q 2169 794 2597 794
Q 3028 794 3217 1042
Q 3406 1291 3406 1869
L 3406 4666
L 4609 4666
L 4609 1869
Q 4609 878 4112 393
Q 3616 -91 2597 -91
Q 1581 -91 1084 393
Q 588 878 588 1869
L 588 4666
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-4d" d="M 588 4666
L 2119 4666
L 3181 2169
L 4250 4666
L 5778 4666
L 5778 0
L 4641 0
L 4641 3413
L 3566 897
L 2803 897
L 1728 3413
L 1728 0
L 588 0
L 588 4666
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-6d" d="M 3781 2919
Q 3994 3244 4286 3414
Q 4578 3584 4928 3584
Q 5531 3584 5847 3212
Q 6163 2841 6163 2131
L 6163 0
L 5038 0
L 5038 1825
Q 5041 1866 5042 1909
Q 5044 1953 5044 2034
Q 5044 2406 4934 2573
Q 4825 2741 4581 2741
Q 4263 2741 4089 2478
Q 3916 2216 3909 1719
L 3909 0
L 2784 0
L 2784 1825
Q 2784 2406 2684 2573
Q 2584 2741 2328 2741
Q 2006 2741 1831 2477
Q 1656 2213 1656 1722
L 1656 0
L 531 0
L 531 3500
L 1656 3500
L 1656 2988
Q 1863 3284 2130 3434
Q 2397 3584 2719 3584
Q 3081 3584 3359 3409
Q 3638 3234 3781 2919
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-79" d="M 78 3500
L 1197 3500
L 2138 1125
L 2938 3500
L 4056 3500
L 2584 -331
Q 2363 -916 2067 -1148
Q 1772 -1381 1288 -1381
L 641 -1381
L 641 -647
L 991 -647
Q 1275 -647 1404 -556
Q 1534 -466 1606 -231
L 1638 -134
L 78 3500
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-28" d="M 2413 -844
L 1484 -844
Q 1006 -72 778 623
Q 550 1319 550 2003
Q 550 2688 779 3389
Q 1009 4091 1484 4856
L 2413 4856
Q 2013 4116 1813 3408
Q 1613 2700 1613 2009
Q 1613 1319 1811 609
Q 2009 -100 2413 -844
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-42" d="M 2456 2859
Q 2741 2859 2887 2984
Q 3034 3109 3034 3353
Q 3034 3594 2887 3720
Q 2741 3847 2456 3847
L 1791 3847
L 1791 2859
L 2456 2859
z
M 2497 819
Q 2859 819 3042 972
Q 3225 1125 3225 1434
Q 3225 1738 3044 1889
Q 2863 2041 2497 2041
L 1791 2041
L 1791 819
L 2497 819
z
M 3616 2497
Q 4003 2384 4215 2081
Q 4428 1778 4428 1338
Q 4428 663 3972 331
Q 3516 0 2584 0
L 588 0
L 588 4666
L 2394 4666
Q 3366 4666 3802 4372
Q 4238 4078 4238 3431
Q 4238 3091 4078 2852
Q 3919 2613 3616 2497
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-29" d="M 513 -844
Q 913 -100 1113 609
Q 1313 1319 1313 2009
Q 1313 2700 1113 3408
Q 913 4116 513 4856
L 1441 4856
Q 1916 4091 2145 3389
Q 2375 2688 2375 2003
Q 2375 1319 2147 623
Q 1919 -72 1441 -844
L 513 -844
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-47"/>
<use xlink:href="#DejaVuSans-Bold-50" x="82.080078"/>
<use xlink:href="#DejaVuSans-Bold-55" x="155.371094"/>
<use xlink:href="#DejaVuSans-Bold-20" x="236.572266"/>
<use xlink:href="#DejaVuSans-Bold-4d" x="271.386719"/>
<use xlink:href="#DejaVuSans-Bold-65" x="370.898438"/>
<use xlink:href="#DejaVuSans-Bold-6d" x="438.720703"/>
<use xlink:href="#DejaVuSans-Bold-6f" x="542.919922"/>
<use xlink:href="#DejaVuSans-Bold-72" x="611.621094"/>
<use xlink:href="#DejaVuSans-Bold-79" x="660.9375"/>
<use xlink:href="#DejaVuSans-Bold-20" x="726.123047"/>
<use xlink:href="#DejaVuSans-Bold-28" x="760.9375"/>
<use xlink:href="#DejaVuSans-Bold-47" x="806.640625"/>
<use xlink:href="#DejaVuSans-Bold-42" x="888.720703"/>
<use xlink:href="#DejaVuSans-Bold-29" x="964.941406"/>
</g>
</g>
</g>
</g>
<g id="patch_3">
<path d="M 22.418182 244.078125
L 56.236364 244.078125
L 56.236364 195.339663
L 22.418182 195.339663
z
" clip-path="url(#p080f205d85)" style="fill: #6baed6"/>
</g>
<g id="patch_4">
<path d="M 140.781818 244.078125
L 174.6 244.078125
L 174.6 146.601202
L 140.781818 146.601202
z
" clip-path="url(#p080f205d85)" style="fill: #6baed6"/>
</g>
<g id="patch_5">
<path d="M 259.145455 244.078125
L 292.963636 244.078125
L 292.963636 205.087356
L 259.145455 205.087356
z
" clip-path="url(#p080f205d85)" style="fill: #6baed6"/>
</g>
<g id="patch_6">
<path d="M 56.236364 244.078125
L 90.054545 244.078125
L 90.054545 32.878125
L 56.236364 32.878125
z
" clip-path="url(#p080f205d85)" style="fill: #3182bd"/>
</g>
<g id="patch_7">
<path d="M 174.6 244.078125
L 208.418182 244.078125
L 208.418182 130.355048
L 174.6 130.355048
z
" clip-path="url(#p080f205d85)" style="fill: #3182bd"/>
</g>
<g id="patch_8">
<path d="M 292.963636 244.078125
L 326.781818 244.078125
L 326.781818 218.084279
L 292.963636 218.084279
z
" clip-path="url(#p080f205d85)" style="fill: #3182bd"/>
</g>
<g id="patch_9">
<path d="M 7.2 244.078125
L 342 244.078125
" style="fill: none; stroke: #dddddd; stroke-width: 0.8; stroke-linejoin: miter; stroke-linecap: square"/>
</g>
<g id="text_4">
<!-- 5.81 -->
<g transform="translate(26.991335 193.259976) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-35" d="M 678 4666
L 3669 4666
L 3669 3781
L 1638 3781
L 1638 3059
Q 1775 3097 1914 3117
Q 2053 3138 2203 3138
Q 3056 3138 3531 2711
Q 4006 2284 4006 1522
Q 4006 766 3489 337
Q 2972 -91 2053 -91
Q 1656 -91 1267 -14
Q 878 63 494 219
L 494 1166
Q 875 947 1217 837
Q 1559 728 1863 728
Q 2300 728 2551 942
Q 2803 1156 2803 1522
Q 2803 1891 2551 2103
Q 2300 2316 1863 2316
Q 1603 2316 1309 2248
Q 1016 2181 678 2041
L 678 4666
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-2e" d="M 653 1209
L 1778 1209
L 1778 0
L 653 0
L 653 1209
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-38" d="M 2228 2088
Q 1891 2088 1709 1903
Q 1528 1719 1528 1375
Q 1528 1031 1709 848
Q 1891 666 2228 666
Q 2563 666 2741 848
Q 2919 1031 2919 1375
Q 2919 1722 2741 1905
Q 2563 2088 2228 2088
z
M 1350 2484
Q 925 2613 709 2878
Q 494 3144 494 3541
Q 494 4131 934 4440
Q 1375 4750 2228 4750
Q 3075 4750 3515 4442
Q 3956 4134 3956 3541
Q 3956 3144 3739 2878
Q 3522 2613 3097 2484
Q 3572 2353 3814 2058
Q 4056 1763 4056 1313
Q 4056 619 3595 264
Q 3134 -91 2228 -91
Q 1319 -91 855 264
Q 391 619 391 1313
Q 391 1763 633 2058
Q 875 2353 1350 2484
z
M 1631 3419
Q 1631 3141 1786 2991
Q 1941 2841 2228 2841
Q 2509 2841 2662 2991
Q 2816 3141 2816 3419
Q 2816 3697 2662 3845
Q 2509 3994 2228 3994
Q 1941 3994 1786 3844
Q 1631 3694 1631 3419
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-31" d="M 750 831
L 1813 831
L 1813 3847
L 722 3622
L 722 4441
L 1806 4666
L 2950 4666
L 2950 831
L 4013 831
L 4013 0
L 750 0
L 750 831
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-35"/>
<use xlink:href="#DejaVuSans-Bold-2e" x="69.580078"/>
<use xlink:href="#DejaVuSans-Bold-38" x="107.568359"/>
<use xlink:href="#DejaVuSans-Bold-31" x="177.148438"/>
</g>
</g>
<g id="text_5">
<!-- 7.20 -->
<g transform="translate(145.354972 144.521514) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-37" d="M 428 4666
L 3944 4666
L 3944 3988
L 2125 0
L 953 0
L 2675 3781
L 428 3781
L 428 4666
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-32" d="M 1844 884
L 3897 884
L 3897 0
L 506 0
L 506 884
L 2209 2388
Q 2438 2594 2547 2791
Q 2656 2988 2656 3200
Q 2656 3528 2436 3728
Q 2216 3928 1850 3928
Q 1569 3928 1234 3808
Q 900 3688 519 3450
L 519 4475
Q 925 4609 1322 4679
Q 1719 4750 2100 4750
Q 2938 4750 3402 4381
Q 3866 4013 3866 3353
Q 3866 2972 3669 2642
Q 3472 2313 2841 1759
L 1844 884
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-30" d="M 2944 2338
Q 2944 3213 2780 3570
Q 2616 3928 2228 3928
Q 1841 3928 1675 3570
Q 1509 3213 1509 2338
Q 1509 1453 1675 1090
Q 1841 728 2228 728
Q 2613 728 2778 1090
Q 2944 1453 2944 2338
z
M 4147 2328
Q 4147 1169 3647 539
Q 3147 -91 2228 -91
Q 1306 -91 806 539
Q 306 1169 306 2328
Q 306 3491 806 4120
Q 1306 4750 2228 4750
Q 3147 4750 3647 4120
Q 4147 3491 4147 2328
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-37"/>
<use xlink:href="#DejaVuSans-Bold-2e" x="69.580078"/>
<use xlink:href="#DejaVuSans-Bold-32" x="107.568359"/>
<use xlink:href="#DejaVuSans-Bold-30" x="177.148438"/>
</g>
</g>
<g id="text_6">
<!-- 5.78 -->
<g transform="translate(263.718608 203.007668) scale(0.1 -0.1)">
<use xlink:href="#DejaVuSans-Bold-35"/>
<use xlink:href="#DejaVuSans-Bold-2e" x="69.580078"/>
<use xlink:href="#DejaVuSans-Bold-37" x="107.568359"/>
<use xlink:href="#DejaVuSans-Bold-38" x="177.148438"/>
</g>
</g>
<g id="text_7">
<!-- 21.67 -->
<g transform="translate(57.330611 30.798438) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-36" d="M 2316 2303
Q 2000 2303 1842 2098
Q 1684 1894 1684 1484
Q 1684 1075 1842 870
Q 2000 666 2316 666
Q 2634 666 2792 870
Q 2950 1075 2950 1484
Q 2950 1894 2792 2098
Q 2634 2303 2316 2303
z
M 3803 4544
L 3803 3681
Q 3506 3822 3243 3889
Q 2981 3956 2731 3956
Q 2194 3956 1894 3657
Q 1594 3359 1544 2772
Q 1750 2925 1990 3001
Q 2231 3078 2516 3078
Q 3231 3078 3670 2659
Q 4109 2241 4109 1563
Q 4109 813 3618 361
Q 3128 -91 2303 -91
Q 1394 -91 895 523
Q 397 1138 397 2266
Q 397 3422 980 4083
Q 1563 4744 2578 4744
Q 2900 4744 3203 4694
Q 3506 4644 3803 4544
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-32"/>
<use xlink:href="#DejaVuSans-Bold-31" x="69.580078"/>
<use xlink:href="#DejaVuSans-Bold-2e" x="139.160156"/>
<use xlink:href="#DejaVuSans-Bold-36" x="177.148438"/>
<use xlink:href="#DejaVuSans-Bold-37" x="246.728516"/>
</g>
</g>
<g id="text_8">
<!-- 7.36 -->
<g transform="translate(179.173153 128.275361) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-33" d="M 2981 2516
Q 3453 2394 3698 2092
Q 3944 1791 3944 1325
Q 3944 631 3412 270
Q 2881 -91 1863 -91
Q 1503 -91 1142 -33
Q 781 25 428 141
L 428 1069
Q 766 900 1098 814
Q 1431 728 1753 728
Q 2231 728 2486 893
Q 2741 1059 2741 1369
Q 2741 1688 2480 1852
Q 2219 2016 1709 2016
L 1228 2016
L 1228 2791
L 1734 2791
Q 2188 2791 2409 2933
Q 2631 3075 2631 3366
Q 2631 3634 2415 3781
Q 2200 3928 1806 3928
Q 1516 3928 1219 3862
Q 922 3797 628 3669
L 628 4550
Q 984 4650 1334 4700
Q 1684 4750 2022 4750
Q 2931 4750 3382 4451
Q 3834 4153 3834 3553
Q 3834 3144 3618 2883
Q 3403 2622 2981 2516
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-37"/>
<use xlink:href="#DejaVuSans-Bold-2e" x="69.580078"/>
<use xlink:href="#DejaVuSans-Bold-33" x="107.568359"/>
<use xlink:href="#DejaVuSans-Bold-36" x="177.148438"/>
</g>
</g>
<g id="text_9">
<!-- 5.14 -->
<g transform="translate(297.53679 216.004591) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-34" d="M 2356 3675
L 1038 1722
L 2356 1722
L 2356 3675
z
M 2156 4666
L 3494 4666
L 3494 1722
L 4159 1722
L 4159 850
L 3494 850
L 3494 0
L 2356 0
L 2356 850
L 288 850
L 288 1881
L 2156 4666
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-35"/>
<use xlink:href="#DejaVuSans-Bold-2e" x="69.580078"/>
<use xlink:href="#DejaVuSans-Bold-31" x="107.568359"/>
<use xlink:href="#DejaVuSans-Bold-34" x="177.148438"/>
</g>
</g>
<g id="text_10">
<!-- ChatGLM2-6B - - 1×A100 -->
<g transform="translate(93.349688 16.318125) scale(0.12 -0.12)">
<defs>
<path id="DejaVuSans-Bold-43" d="M 4288 256
Q 3956 84 3597 -3
Q 3238 -91 2847 -91
Q 1681 -91 1000 561
Q 319 1213 319 2328
Q 319 3447 1000 4098
Q 1681 4750 2847 4750
Q 3238 4750 3597 4662
Q 3956 4575 4288 4403
L 4288 3438
Q 3953 3666 3628 3772
Q 3303 3878 2944 3878
Q 2300 3878 1931 3465
Q 1563 3053 1563 2328
Q 1563 1606 1931 1193
Q 2300 781 2944 781
Q 3303 781 3628 887
Q 3953 994 4288 1222
L 4288 256
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-68" d="M 4056 2131
L 4056 0
L 2931 0
L 2931 347
L 2931 1625
Q 2931 2084 2911 2256
Q 2891 2428 2841 2509
Q 2775 2619 2662 2680
Q 2550 2741 2406 2741
Q 2056 2741 1856 2470
Q 1656 2200 1656 1722
L 1656 0
L 538 0
L 538 4863
L 1656 4863
L 1656 2988
Q 1909 3294 2193 3439
Q 2478 3584 2822 3584
Q 3428 3584 3742 3212
Q 4056 2841 4056 2131
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-74" d="M 1759 4494
L 1759 3500
L 2913 3500
L 2913 2700
L 1759 2700
L 1759 1216
Q 1759 972 1856 886
Q 1953 800 2241 800
L 2816 800
L 2816 0
L 1856 0
Q 1194 0 917 276
Q 641 553 641 1216
L 641 2700
L 84 2700
L 84 3500
L 641 3500
L 641 4494
L 1759 4494
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-4c" d="M 588 4666
L 1791 4666
L 1791 909
L 3903 909
L 3903 0
L 588 0
L 588 4666
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-2d" d="M 347 2297
L 2309 2297
L 2309 1388
L 347 1388
L 347 2297
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-d7" d="M 4563 3359
L 3206 2003
L 4563 653
L 4038 128
L 2681 1478
L 1325 128
L 800 653
L 2156 2003
L 800 3359
L 1325 3884
L 2681 2528
L 4038 3884
L 4563 3359
z
" transform="scale(0.015625)"/>
<path id="DejaVuSans-Bold-41" d="M 3419 850
L 1538 850
L 1241 0
L 31 0
L 1759 4666
L 3194 4666
L 4922 0
L 3713 0
L 3419 850
z
M 1838 1716
L 3116 1716
L 2478 3572
L 1838 1716
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-43"/>
<use xlink:href="#DejaVuSans-Bold-68" x="73.388672"/>
<use xlink:href="#DejaVuSans-Bold-61" x="144.580078"/>
<use xlink:href="#DejaVuSans-Bold-74" x="212.060547"/>
<use xlink:href="#DejaVuSans-Bold-47" x="259.863281"/>
<use xlink:href="#DejaVuSans-Bold-4c" x="341.943359"/>
<use xlink:href="#DejaVuSans-Bold-4d" x="405.664062"/>
<use xlink:href="#DejaVuSans-Bold-32" x="505.175781"/>
<use xlink:href="#DejaVuSans-Bold-2d" x="574.755859"/>
<use xlink:href="#DejaVuSans-Bold-36" x="616.259766"/>
<use xlink:href="#DejaVuSans-Bold-42" x="685.839844"/>
<use xlink:href="#DejaVuSans-Bold-20" x="762.060547"/>
<use xlink:href="#DejaVuSans-Bold-2d" x="796.875"/>
<use xlink:href="#DejaVuSans-Bold-2d" x="838.378906"/>
<use xlink:href="#DejaVuSans-Bold-20" x="879.882812"/>
<use xlink:href="#DejaVuSans-Bold-31" x="914.697266"/>
<use xlink:href="#DejaVuSans-Bold-d7" x="984.277344"/>
<use xlink:href="#DejaVuSans-Bold-41" x="1068.066406"/>
<use xlink:href="#DejaVuSans-Bold-31" x="1145.458984"/>
<use xlink:href="#DejaVuSans-Bold-30" x="1215.039062"/>
<use xlink:href="#DejaVuSans-Bold-30" x="1284.619141"/>
</g>
</g>
<g id="legend_1">
<g id="patch_10">
<path d="M 201.507812 59.830625
L 335 59.830625
Q 337 59.830625 337 57.830625
L 337 29.318125
Q 337 27.318125 335 27.318125
L 201.507812 27.318125
Q 199.507812 27.318125 199.507812 29.318125
L 199.507812 57.830625
Q 199.507812 59.830625 201.507812 59.830625
L 201.507812 59.830625
z
" style="fill: none; opacity: 0"/>
</g>
<g id="patch_11">
<path d="M 203.507812 38.916562
L 223.507812 38.916562
L 223.507812 31.916562
L 203.507812 31.916562
z
" style="fill: #6baed6"/>
</g>
<g id="text_11">
<!-- ChatGLM P-Tuning -->
<g transform="translate(231.507812 38.916562) scale(0.1 -0.1)">
<use xlink:href="#DejaVuSans-Bold-43"/>
<use xlink:href="#DejaVuSans-Bold-68" x="73.388672"/>
<use xlink:href="#DejaVuSans-Bold-61" x="144.580078"/>
<use xlink:href="#DejaVuSans-Bold-74" x="212.060547"/>
<use xlink:href="#DejaVuSans-Bold-47" x="259.863281"/>
<use xlink:href="#DejaVuSans-Bold-4c" x="341.943359"/>
<use xlink:href="#DejaVuSans-Bold-4d" x="405.664062"/>
<use xlink:href="#DejaVuSans-Bold-20" x="505.175781"/>
<use xlink:href="#DejaVuSans-Bold-50" x="539.990234"/>
<use xlink:href="#DejaVuSans-Bold-2d" x="611.53125"/>
<use xlink:href="#DejaVuSans-Bold-54" x="638.285156"/>
<use xlink:href="#DejaVuSans-Bold-75" x="695.498047"/>
<use xlink:href="#DejaVuSans-Bold-6e" x="766.689453"/>
<use xlink:href="#DejaVuSans-Bold-69" x="837.880859"/>
<use xlink:href="#DejaVuSans-Bold-6e" x="872.158203"/>
<use xlink:href="#DejaVuSans-Bold-67" x="943.349609"/>
</g>
</g>
<g id="patch_12">
<path d="M 203.507812 53.672812
L 223.507812 53.672812
L 223.507812 46.672812
L 203.507812 46.672812
z
" style="fill: #3182bd"/>
</g>
<g id="text_12">
<!-- LLaMA-Factory -->
<g transform="translate(231.507812 53.672812) scale(0.1 -0.1)">
<defs>
<path id="DejaVuSans-Bold-46" d="M 588 4666
L 3834 4666
L 3834 3756
L 1791 3756
L 1791 2888
L 3713 2888
L 3713 1978
L 1791 1978
L 1791 0
L 588 0
L 588 4666
z
" transform="scale(0.015625)"/>
</defs>
<use xlink:href="#DejaVuSans-Bold-4c"/>
<use xlink:href="#DejaVuSans-Bold-4c" x="63.720703"/>
<use xlink:href="#DejaVuSans-Bold-61" x="127.441406"/>
<use xlink:href="#DejaVuSans-Bold-4d" x="194.921875"/>
<use xlink:href="#DejaVuSans-Bold-41" x="294.433594"/>
<use xlink:href="#DejaVuSans-Bold-2d" x="371.826172"/>
<use xlink:href="#DejaVuSans-Bold-46" x="413.330078"/>
<use xlink:href="#DejaVuSans-Bold-61" x="475.765625"/>
<use xlink:href="#DejaVuSans-Bold-63" x="543.246094"/>
<use xlink:href="#DejaVuSans-Bold-74" x="602.523438"/>
<use xlink:href="#DejaVuSans-Bold-6f" x="650.326172"/>
<use xlink:href="#DejaVuSans-Bold-72" x="719.027344"/>
<use xlink:href="#DejaVuSans-Bold-79" x="768.34375"/>
</g>
</g>
</g>
</g>
</g>
<defs>
<clipPath id="p080f205d85">
<rect x="7.2" y="22.318125" width="334.8" height="221.76"/>
</clipPath>
</defs>
</svg>
| LLaMA-Factory/assets/benchmark.svg/0 | {
"file_path": "LLaMA-Factory/assets/benchmark.svg",
"repo_id": "LLaMA-Factory",
"token_count": 15414
} | 0 |
services:
llamafactory:
build:
dockerfile: ./docker/docker-npu/Dockerfile
context: ../..
args:
INSTALL_DEEPSPEED: false
PIP_INDEX: https://pypi.org/simple
container_name: llamafactory
volumes:
- ../../hf_cache:/root/.cache/huggingface
- ../../ms_cache:/root/.cache/modelscope
- ../../data:/app/data
- ../../output:/app/output
- /usr/local/dcmi:/usr/local/dcmi
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
- /etc/ascend_install.info:/etc/ascend_install.info
ports:
- "7860:7860"
- "8000:8000"
ipc: host
tty: true
stdin_open: true
command: bash
devices:
- /dev/davinci0
- /dev/davinci_manager
- /dev/devmm_svm
- /dev/hisi_hdc
restart: unless-stopped
| LLaMA-Factory/docker/docker-npu/docker-compose.yml/0 | {
"file_path": "LLaMA-Factory/docker/docker-npu/docker-compose.yml",
"repo_id": "LLaMA-Factory",
"token_count": 433
} | 1 |
{
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"zero_allow_untested_optimizer": true,
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"zero_optimization": {
"stage": 3,
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 1e9,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_16bit_weights_on_model_save": true
}
} | LLaMA-Factory/examples/deepspeed/ds_z3_config.json/0 | {
"file_path": "LLaMA-Factory/examples/deepspeed/ds_z3_config.json",
"repo_id": "LLaMA-Factory",
"token_count": 351
} | 2 |
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
template: llama3
finetuning_type: lora
### export
export_dir: models/llama3_lora_sft
export_size: 2
export_device: cpu
export_legacy_format: false
| LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml/0 | {
"file_path": "LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml",
"repo_id": "LLaMA-Factory",
"token_count": 130
} | 3 |
# coding=utf-8
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dataclasses import dataclass
from typing import Any, Dict, Literal, Optional, Sequence
import fire
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq
from llamafactory.data import get_dataset
from llamafactory.extras.constants import IGNORE_INDEX
from llamafactory.hparams import get_train_args
from llamafactory.model import load_model, load_tokenizer
@dataclass
class PairwiseDataCollatorWithPadding(DataCollatorForSeq2Seq):
r"""
Data collator for pairwise data.
"""
train_on_prompt: bool = False
def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
r"""
Pads batched data to the longest sequence in the batch.
We generate 2 * n examples where the first n examples represent chosen examples and
the last n examples represent rejected examples.
"""
chosen_features = []
for feature in features:
prompt_len, answer_len = len(feature["prompt_ids"]), len(feature["chosen_ids"])
input_ids = feature["prompt_ids"] + feature["chosen_ids"]
attention_mask = [1] * (prompt_len + answer_len)
labels = input_ids if self.train_on_prompt else [IGNORE_INDEX] * prompt_len + feature["chosen_ids"]
chosen_features.append({"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels})
return super().__call__(chosen_features)
def cal_ppl(
model_name_or_path: str,
save_name: str,
batch_size: int = 4,
stage: Literal["pt", "sft", "rm"] = "sft",
dataset: str = "alpaca_en",
dataset_dir: str = "data",
template: str = "default",
cutoff_len: int = 1024,
max_samples: Optional[int] = None,
train_on_prompt: bool = False,
):
r"""
Calculates the ppl on the dataset of the pre-trained models.
Usage: python cal_ppl.py --model_name_or_path path_to_model --save_name ppl.json
"""
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
dict(
stage=stage,
model_name_or_path=model_name_or_path,
dataset=dataset,
dataset_dir=dataset_dir,
template=template,
cutoff_len=cutoff_len,
max_samples=max_samples,
train_on_prompt=train_on_prompt,
output_dir="dummy_dir",
overwrite_cache=True,
)
)
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
trainset = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module)
model = load_model(tokenizer, model_args, finetuning_args, is_trainable=False)
if stage == "pt":
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
elif stage == "sft":
data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX)
elif stage == "rm":
data_collator = PairwiseDataCollatorWithPadding(
tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX, train_on_prompt=train_on_prompt
)
else:
raise NotImplementedError
dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
criterion = torch.nn.CrossEntropyLoss(reduction="none")
total_ppl = 0
perplexities = []
batch: Dict[str, "torch.Tensor"]
with torch.no_grad():
for batch in tqdm(dataloader):
batch = batch.to(model.device)
outputs = model(**batch)
shift_logits: "torch.Tensor" = outputs["logits"][..., :-1, :]
shift_labels: "torch.Tensor" = batch["labels"][..., 1:]
loss_mask = shift_labels != IGNORE_INDEX
flatten_logits = shift_logits.contiguous().view(shift_labels.size(0) * shift_labels.size(1), -1)
flatten_labels = shift_labels.contiguous().view(-1)
token_logps: "torch.Tensor" = criterion(flatten_logits, flatten_labels)
token_logps = token_logps.contiguous().view(shift_logits.size(0), -1)
sentence_logps = (token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
total_ppl += sentence_logps.exp().sum().item()
perplexities.extend(sentence_logps.exp().tolist())
with open(save_name, "w", encoding="utf-8") as f:
json.dump(perplexities, f, indent=2)
print("Average perplexity is {:.2f}".format(total_ppl / len(perplexities)))
print("Perplexities have been saved at {}.".format(save_name))
if __name__ == "__main__":
fire.Fire(cal_ppl)
| LLaMA-Factory/scripts/cal_ppl.py/0 | {
"file_path": "LLaMA-Factory/scripts/cal_ppl.py",
"repo_id": "LLaMA-Factory",
"token_count": 2136
} | 4 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOCALES = {
"lang": {
"en": {
"label": "Lang",
},
"ru": {
"label": "Русский",
},
"zh": {
"label": "语言",
},
},
"model_name": {
"en": {
"label": "Model name",
},
"ru": {
"label": "Название модели",
},
"zh": {
"label": "模型名称",
},
},
"model_path": {
"en": {
"label": "Model path",
"info": "Path to pretrained model or model identifier from Hugging Face.",
},
"ru": {
"label": "Путь к модели",
"info": "Путь к предварительно обученной модели или идентификатор модели от Hugging Face.",
},
"zh": {
"label": "模型路径",
"info": "本地模型的文件路径或 Hugging Face 的模型标识符。",
},
},
"finetuning_type": {
"en": {
"label": "Finetuning method",
},
"ru": {
"label": "Метод дообучения",
},
"zh": {
"label": "微调方法",
},
},
"checkpoint_path": {
"en": {
"label": "Checkpoint path",
},
"ru": {
"label": "Путь контрольной точки",
},
"zh": {
"label": "检查点路径",
},
},
"advanced_tab": {
"en": {
"label": "Advanced configurations",
},
"ru": {
"label": "Расширенные конфигурации",
},
"zh": {
"label": "高级设置",
},
},
"quantization_bit": {
"en": {
"label": "Quantization bit",
"info": "Enable quantization (QLoRA).",
},
"ru": {
"label": "Уровень квантования",
"info": "Включить квантование (QLoRA).",
},
"zh": {
"label": "量化等级",
"info": "启用量化(QLoRA)。",
},
},
"quantization_method": {
"en": {
"label": "Quantization method",
"info": "Quantization algorithm to use.",
},
"ru": {
"label": "Метод квантования",
"info": "Алгоритм квантования, который следует использовать.",
},
"zh": {
"label": "量化方法",
"info": "使用的量化算法。",
},
},
"template": {
"en": {
"label": "Prompt template",
"info": "The template used in constructing prompts.",
},
"ru": {
"label": "Шаблон запроса",
"info": "Шаблон, используемый при формировании запросов.",
},
"zh": {
"label": "提示模板",
"info": "构建提示词时使用的模板",
},
},
"rope_scaling": {
"en": {
"label": "RoPE scaling",
},
"ru": {
"label": "Масштабирование RoPE",
},
"zh": {
"label": "RoPE 插值方法",
},
},
"booster": {
"en": {
"label": "Booster",
},
"ru": {
"label": "Ускоритель",
},
"zh": {
"label": "加速方式",
},
},
"visual_inputs": {
"en": {
"label": "Visual inputs",
},
"ru": {
"label": "визуальные входы",
},
"zh": {
"label": "图像输入",
},
},
"training_stage": {
"en": {
"label": "Stage",
"info": "The stage to perform in training.",
},
"ru": {
"label": "Этап",
"info": "Этап выполнения обучения.",
},
"zh": {
"label": "训练阶段",
"info": "目前采用的训练方式。",
},
},
"dataset_dir": {
"en": {
"label": "Data dir",
"info": "Path to the data directory.",
},
"ru": {
"label": "Директория данных",
"info": "Путь к директории данных.",
},
"zh": {
"label": "数据路径",
"info": "数据文件夹的路径。",
},
},
"dataset": {
"en": {
"label": "Dataset",
},
"ru": {
"label": "Набор данных",
},
"zh": {
"label": "数据集",
},
},
"data_preview_btn": {
"en": {
"value": "Preview dataset",
},
"ru": {
"value": "Просмотреть набор данных",
},
"zh": {
"value": "预览数据集",
},
},
"preview_count": {
"en": {
"label": "Count",
},
"ru": {
"label": "Количество",
},
"zh": {
"label": "数量",
},
},
"page_index": {
"en": {
"label": "Page",
},
"ru": {
"label": "Страница",
},
"zh": {
"label": "页数",
},
},
"prev_btn": {
"en": {
"value": "Prev",
},
"ru": {
"value": "Предыдущая",
},
"zh": {
"value": "上一页",
},
},
"next_btn": {
"en": {
"value": "Next",
},
"ru": {
"value": "Следующая",
},
"zh": {
"value": "下一页",
},
},
"close_btn": {
"en": {
"value": "Close",
},
"ru": {
"value": "Закрыть",
},
"zh": {
"value": "关闭",
},
},
"preview_samples": {
"en": {
"label": "Samples",
},
"ru": {
"label": "Примеры",
},
"zh": {
"label": "样例",
},
},
"learning_rate": {
"en": {
"label": "Learning rate",
"info": "Initial learning rate for AdamW.",
},
"ru": {
"label": "Скорость обучения",
"info": "Начальная скорость обучения для AdamW.",
},
"zh": {
"label": "学习率",
"info": "AdamW 优化器的初始学习率。",
},
},
"num_train_epochs": {
"en": {
"label": "Epochs",
"info": "Total number of training epochs to perform.",
},
"ru": {
"label": "Эпохи",
"info": "Общее количество эпох обучения.",
},
"zh": {
"label": "训练轮数",
"info": "需要执行的训练总轮数。",
},
},
"max_grad_norm": {
"en": {
"label": "Maximum gradient norm",
"info": "Norm for gradient clipping.",
},
"ru": {
"label": "Максимальная норма градиента",
"info": "Норма для обрезки градиента.",
},
"zh": {
"label": "最大梯度范数",
"info": "用于梯度裁剪的范数。",
},
},
"max_samples": {
"en": {
"label": "Max samples",
"info": "Maximum samples per dataset.",
},
"ru": {
"label": "Максимальное количество образцов",
"info": "Максимальное количество образцов на набор данных.",
},
"zh": {
"label": "最大样本数",
"info": "每个数据集的最大样本数。",
},
},
"compute_type": {
"en": {
"label": "Compute type",
"info": "Whether to use mixed precision training.",
},
"ru": {
"label": "Тип вычислений",
"info": "Использовать ли обучение смешанной точности.",
},
"zh": {
"label": "计算类型",
"info": "是否使用混合精度训练。",
},
},
"cutoff_len": {
"en": {
"label": "Cutoff length",
"info": "Max tokens in input sequence.",
},
"ru": {
"label": "Длина обрезки",
"info": "Максимальное количество токенов во входной последовательности.",
},
"zh": {
"label": "截断长度",
"info": "输入序列分词后的最大长度。",
},
},
"batch_size": {
"en": {
"label": "Batch size",
"info": "Number of samples processed on each GPU.",
},
"ru": {
"label": "Размер пакета",
"info": "Количество образцов для обработки на каждом GPU.",
},
"zh": {
"label": "批处理大小",
"info": "每个 GPU 处理的样本数量。",
},
},
"gradient_accumulation_steps": {
"en": {
"label": "Gradient accumulation",
"info": "Number of steps for gradient accumulation.",
},
"ru": {
"label": "Накопление градиента",
"info": "Количество шагов накопления градиента.",
},
"zh": {
"label": "梯度累积",
"info": "梯度累积的步数。",
},
},
"val_size": {
"en": {
"label": "Val size",
"info": "Proportion of data in the dev set.",
},
"ru": {
"label": "Размер валидации",
"info": "Пропорция данных в наборе для разработки.",
},
"zh": {
"label": "验证集比例",
"info": "验证集占全部样本的百分比。",
},
},
"lr_scheduler_type": {
"en": {
"label": "LR scheduler",
"info": "Name of the learning rate scheduler.",
},
"ru": {
"label": "Планировщик скорости обучения",
"info": "Название планировщика скорости обучения.",
},
"zh": {
"label": "学习率调节器",
"info": "学习率调度器的名称。",
},
},
"extra_tab": {
"en": {
"label": "Extra configurations",
},
"ru": {
"label": "Дополнительные конфигурации",
},
"zh": {
"label": "其它参数设置",
},
},
"logging_steps": {
"en": {
"label": "Logging steps",
"info": "Number of steps between two logs.",
},
"ru": {
"label": "Шаги логирования",
"info": "Количество шагов между двумя записями в журнале.",
},
"zh": {
"label": "日志间隔",
"info": "每两次日志输出间的更新步数。",
},
},
"save_steps": {
"en": {
"label": "Save steps",
"info": "Number of steps between two checkpoints.",
},
"ru": {
"label": "Шаги сохранения",
"info": "Количество шагов между двумя контрольными точками.",
},
"zh": {
"label": "保存间隔",
"info": "每两次断点保存间的更新步数。",
},
},
"warmup_steps": {
"en": {
"label": "Warmup steps",
"info": "Number of steps used for warmup.",
},
"ru": {
"label": "Шаги прогрева",
"info": "Количество шагов, используемых для прогрева.",
},
"zh": {
"label": "预热步数",
"info": "学习率预热采用的步数。",
},
},
"neftune_alpha": {
"en": {
"label": "NEFTune Alpha",
"info": "Magnitude of noise adding to embedding vectors.",
},
"ru": {
"label": "NEFTune Alpha",
"info": "Величина шума, добавляемого к векторам вложений.",
},
"zh": {
"label": "NEFTune 噪声参数",
"info": "嵌入向量所添加的噪声大小。",
},
},
"optim": {
"en": {
"label": "Optimizer",
"info": "The optimizer to use: adamw_torch, adamw_8bit or adafactor.",
},
"ru": {
"label": "Оптимизатор",
"info": "Оптимизатор для использования: adamw_torch, adamw_8bit или adafactor.",
},
"zh": {
"label": "优化器",
"info": "使用的优化器:adamw_torch、adamw_8bit 或 adafactor。",
},
},
"resize_vocab": {
"en": {
"label": "Resize token embeddings",
"info": "Resize the tokenizer vocab and the embedding layers.",
},
"ru": {
"label": "Изменение размера токенных эмбеддингов",
"info": "Изменить размер словаря токенизатора и слоев эмбеддинга.",
},
"zh": {
"label": "更改词表大小",
"info": "更改分词器词表和嵌入层的大小。",
},
},
"packing": {
"en": {
"label": "Pack sequences",
"info": "Pack sequences into samples of fixed length.",
},
"ru": {
"label": "Упаковка последовательностей",
"info": "Упаковка последовательностей в образцы фиксированной длины.",
},
"zh": {
"label": "序列打包",
"info": "将序列打包为等长样本。",
},
},
"upcast_layernorm": {
"en": {
"label": "Upcast LayerNorm",
"info": "Upcast weights of layernorm in float32.",
},
"ru": {
"label": "Приведение весов LayerNorm",
"info": "Приведение весов LayerNorm к float32.",
},
"zh": {
"label": "缩放归一化层",
"info": "将归一化层权重缩放至 32 位精度。",
},
},
"use_llama_pro": {
"en": {
"label": "Enable LLaMA Pro",
"info": "Make the parameters in the expanded blocks trainable.",
},
"ru": {
"label": "Включить LLaMA Pro",
"info": "Сделать параметры в расширенных блоках обучаемыми.",
},
"zh": {
"label": "使用 LLaMA Pro",
"info": "仅训练块扩展后的参数。",
},
},
"shift_attn": {
"en": {
"label": "Enable S^2 Attention",
"info": "Use shift short attention proposed by LongLoRA.",
},
"ru": {
"label": "Включить S^2 внимание",
"info": "Использовать сдвиг внимания на короткие дистанции предложенный LongLoRA.",
},
"zh": {
"label": "使用 S^2 Attention",
"info": "使用 LongLoRA 提出的 shift short attention。",
},
},
"report_to": {
"en": {
"label": "Enable external logger",
"info": "Use TensorBoard or wandb to log experiment.",
},
"ru": {
"label": "Включить внешний регистратор",
"info": "Использовать TensorBoard или wandb для ведения журнала экспериментов.",
},
"zh": {
"label": "启用外部记录面板",
"info": "使用 TensorBoard 或 wandb 记录实验。",
},
},
"freeze_tab": {
"en": {
"label": "Freeze tuning configurations",
},
"ru": {
"label": "конфигурации для настройки заморозки",
},
"zh": {
"label": "部分参数微调设置",
},
},
"freeze_trainable_layers": {
"en": {
"label": "Trainable layers",
"info": "Number of the last(+)/first(-) hidden layers to be set as trainable.",
},
"ru": {
"label": "Обучаемые слои",
"info": "Количество последних (+)/первых (-) скрытых слоев, которые будут установлены как обучаемые.",
},
"zh": {
"label": "可训练层数",
"info": "最末尾(+)/最前端(-)可训练隐藏层的数量。",
},
},
"freeze_trainable_modules": {
"en": {
"label": "Trainable modules",
"info": "Name(s) of trainable modules. Use commas to separate multiple modules.",
},
"ru": {
"label": "Обучаемые модули",
"info": "Название обучаемых модулей. Используйте запятые для разделения нескольких модулей.",
},
"zh": {
"label": "可训练模块",
"info": "可训练模块的名称。使用英文逗号分隔多个名称。",
},
},
"freeze_extra_modules": {
"en": {
"label": "Extra modules (optional)",
"info": (
"Name(s) of modules apart from hidden layers to be set as trainable. "
"Use commas to separate multiple modules."
),
},
"ru": {
"label": "Дополнительные модули (опционально)",
"info": (
"Имена модулей, кроме скрытых слоев, которые следует установить в качестве обучаемых. "
"Используйте запятые для разделения нескольких модулей."
),
},
"zh": {
"label": "额外模块(非必填)",
"info": "除隐藏层以外的可训练模块名称。使用英文逗号分隔多个名称。",
},
},
"lora_tab": {
"en": {
"label": "LoRA configurations",
},
"ru": {
"label": "Конфигурации LoRA",
},
"zh": {
"label": "LoRA 参数设置",
},
},
"lora_rank": {
"en": {
"label": "LoRA rank",
"info": "The rank of LoRA matrices.",
},
"ru": {
"label": "Ранг матриц LoRA",
"info": "Ранг матриц LoRA.",
},
"zh": {
"label": "LoRA 秩",
"info": "LoRA 矩阵的秩大小。",
},
},
"lora_alpha": {
"en": {
"label": "LoRA alpha",
"info": "Lora scaling coefficient.",
},
"ru": {
"label": "LoRA alpha",
"info": "Коэффициент масштабирования LoRA.",
},
"zh": {
"label": "LoRA 缩放系数",
"info": "LoRA 缩放系数大小。",
},
},
"lora_dropout": {
"en": {
"label": "LoRA dropout",
"info": "Dropout ratio of LoRA weights.",
},
"ru": {
"label": "Вероятность отсева LoRA",
"info": "Вероятность отсева весов LoRA.",
},
"zh": {
"label": "LoRA 随机丢弃",
"info": "LoRA 权重随机丢弃的概率。",
},
},
"loraplus_lr_ratio": {
"en": {
"label": "LoRA+ LR ratio",
"info": "The LR ratio of the B matrices in LoRA.",
},
"ru": {
"label": "LoRA+ LR коэффициент",
"info": "Коэффициент LR матриц B в LoRA.",
},
"zh": {
"label": "LoRA+ 学习率比例",
"info": "LoRA+ 中 B 矩阵的学习率倍数。",
},
},
"create_new_adapter": {
"en": {
"label": "Create new adapter",
"info": "Create a new adapter with randomly initialized weight upon the existing one.",
},
"ru": {
"label": "Создать новый адаптер",
"info": "Создать новый адаптер с случайной инициализацией веса на основе существующего.",
},
"zh": {
"label": "新建适配器",
"info": "在现有的适配器上创建一个随机初始化后的新适配器。",
},
},
"use_rslora": {
"en": {
"label": "Use rslora",
"info": "Use the rank stabilization scaling factor for LoRA layer.",
},
"ru": {
"label": "Использовать rslora",
"info": "Использовать коэффициент масштабирования стабилизации ранга для слоя LoRA.",
},
"zh": {
"label": "使用 rslora",
"info": "对 LoRA 层使用秩稳定缩放方法。",
},
},
"use_dora": {
"en": {
"label": "Use DoRA",
"info": "Use weight-decomposed LoRA.",
},
"ru": {
"label": "Используйте DoRA",
"info": "Используйте LoRA с декомпозицией весов.",
},
"zh": {
"label": "使用 DoRA",
"info": "使用权重分解的 LoRA。",
},
},
"use_pissa": {
"en": {
"label": "Use PiSSA",
"info": "Use PiSSA method.",
},
"ru": {
"label": "используйте PiSSA",
"info": "Используйте метод PiSSA.",
},
"zh": {
"label": "使用 PiSSA",
"info": "使用 PiSSA 方法。",
},
},
"lora_target": {
"en": {
"label": "LoRA modules (optional)",
"info": "Name(s) of modules to apply LoRA. Use commas to separate multiple modules.",
},
"ru": {
"label": "Модули LoRA (опционально)",
"info": "Имена модулей для применения LoRA. Используйте запятые для разделения нескольких модулей.",
},
"zh": {
"label": "LoRA 作用模块(非必填)",
"info": "应用 LoRA 的模块名称。使用英文逗号分隔多个名称。",
},
},
"additional_target": {
"en": {
"label": "Additional modules (optional)",
"info": (
"Name(s) of modules apart from LoRA layers to be set as trainable. "
"Use commas to separate multiple modules."
),
},
"ru": {
"label": "Дополнительные модули (опционально)",
"info": (
"Имена модулей, кроме слоев LoRA, которые следует установить в качестве обучаемых. "
"Используйте запятые для разделения нескольких модулей."
),
},
"zh": {
"label": "附加模块(非必填)",
"info": "除 LoRA 层以外的可训练模块名称。使用英文逗号分隔多个名称。",
},
},
"rlhf_tab": {
"en": {
"label": "RLHF configurations",
},
"ru": {
"label": "Конфигурации RLHF",
},
"zh": {
"label": "RLHF 参数设置",
},
},
"pref_beta": {
"en": {
"label": "Beta value",
"info": "Value of the beta parameter in the loss.",
},
"ru": {
"label": "Бета значение",
"info": "Значение параметра бета в функции потерь.",
},
"zh": {
"label": "Beta 参数",
"info": "损失函数中 beta 超参数大小。",
},
},
"pref_ftx": {
"en": {
"label": "Ftx gamma",
"info": "The weight of SFT loss in the final loss.",
},
"ru": {
"label": "Ftx гамма",
"info": "Вес потери SFT в итоговой потере.",
},
"zh": {
"label": "Ftx gamma",
"info": "损失函数中 SFT 损失的权重大小。",
},
},
"pref_loss": {
"en": {
"label": "Loss type",
"info": "The type of the loss function.",
},
"ru": {
"label": "Тип потерь",
"info": "Тип функции потерь.",
},
"zh": {
"label": "损失类型",
"info": "损失函数的类型。",
},
},
"reward_model": {
"en": {
"label": "Reward model",
"info": "Adapter of the reward model in PPO training.",
},
"ru": {
"label": "Модель вознаграждения",
"info": "Адаптер модели вознаграждения для обучения PPO.",
},
"zh": {
"label": "奖励模型",
"info": "PPO 训练中奖励模型的适配器路径。",
},
},
"ppo_score_norm": {
"en": {
"label": "Score norm",
"info": "Normalizing scores in PPO training.",
},
"ru": {
"label": "Норма оценок",
"info": "Нормализация оценок в тренировке PPO.",
},
"zh": {
"label": "奖励模型",
"info": "PPO 训练中归一化奖励分数。",
},
},
"ppo_whiten_rewards": {
"en": {
"label": "Whiten rewards",
"info": "Whiten the rewards in PPO training.",
},
"ru": {
"label": "Белые вознаграждения",
"info": "Осветлите вознаграждения в обучении PPO.",
},
"zh": {
"label": "白化奖励",
"info": "PPO 训练中将奖励分数做白化处理。",
},
},
"galore_tab": {
"en": {
"label": "GaLore configurations",
},
"ru": {
"label": "Конфигурации GaLore",
},
"zh": {
"label": "GaLore 参数设置",
},
},
"use_galore": {
"en": {
"label": "Use GaLore",
"info": "Enable gradient low-Rank projection.",
},
"ru": {
"label": "Использовать GaLore",
"info": "Включить проекцию градиента на низкоранговое пространство.",
},
"zh": {
"label": "使用 GaLore",
"info": "使用梯度低秩投影。",
},
},
"galore_rank": {
"en": {
"label": "GaLore rank",
"info": "The rank of GaLore gradients.",
},
"ru": {
"label": "Ранг GaLore",
"info": "Ранг градиентов GaLore.",
},
"zh": {
"label": "GaLore 秩",
"info": "GaLore 梯度的秩大小。",
},
},
"galore_update_interval": {
"en": {
"label": "Update interval",
"info": "Number of steps to update the GaLore projection.",
},
"ru": {
"label": "Интервал обновления",
"info": "Количество шагов для обновления проекции GaLore.",
},
"zh": {
"label": "更新间隔",
"info": "相邻两次投影更新的步数。",
},
},
"galore_scale": {
"en": {
"label": "GaLore scale",
"info": "GaLore scaling coefficient.",
},
"ru": {
"label": "LoRA Alpha",
"info": "Коэффициент масштабирования GaLore.",
},
"zh": {
"label": "GaLore 缩放系数",
"info": "GaLore 缩放系数大小。",
},
},
"galore_target": {
"en": {
"label": "GaLore modules",
"info": "Name(s) of modules to apply GaLore. Use commas to separate multiple modules.",
},
"ru": {
"label": "Модули GaLore",
"info": "Имена модулей для применения GaLore. Используйте запятые для разделения нескольких модулей.",
},
"zh": {
"label": "GaLore 作用模块",
"info": "应用 GaLore 的模块名称。使用英文逗号分隔多个名称。",
},
},
"badam_tab": {
"en": {
"label": "BAdam configurations",
},
"ru": {
"label": "Конфигурации BAdam",
},
"zh": {
"label": "BAdam 参数设置",
},
},
"use_badam": {
"en": {
"label": "Use BAdam",
"info": "Enable the BAdam optimizer.",
},
"ru": {
"label": "Использовать BAdam",
"info": "Включите оптимизатор BAdam.",
},
"zh": {
"label": "使用 BAdam",
"info": "使用 BAdam 优化器。",
},
},
"badam_mode": {
"en": {
"label": "BAdam mode",
"info": "Whether to use layer-wise or ratio-wise BAdam optimizer.",
},
"ru": {
"label": "Режим BAdam",
"info": "Использовать ли оптимизатор BAdam с послоевой или пропорциональной настройкой.",
},
"zh": {
"label": "BAdam 模式",
"info": "使用 layer-wise 或 ratio-wise BAdam 优化器。",
},
},
"badam_switch_mode": {
"en": {
"label": "Switch mode",
"info": "The strategy of picking block to update for layer-wise BAdam.",
},
"ru": {
"label": "Режим переключения",
"info": "Стратегия выбора блока для обновления для послойного BAdam.",
},
"zh": {
"label": "切换策略",
"info": "Layer-wise BAdam 优化器的块切换策略。",
},
},
"badam_switch_interval": {
"en": {
"label": "Switch interval",
"info": "Number of steps to update the block for layer-wise BAdam.",
},
"ru": {
"label": "Интервал переключения",
"info": "количество шагов для обновления блока для пошагового BAdam.",
},
"zh": {
"label": "切换频率",
"info": "Layer-wise BAdam 优化器的块切换频率。",
},
},
"badam_update_ratio": {
"en": {
"label": "Update ratio",
"info": "The ratio of the update for ratio-wise BAdam.",
},
"ru": {
"label": "Коэффициент обновления",
"info": "Коэффициент обновления для BAdam с учётом соотношений.",
},
"zh": {
"label": "Block 更新比例",
"info": "Ratio-wise BAdam 优化器的更新比例。",
},
},
"cmd_preview_btn": {
"en": {
"value": "Preview command",
},
"ru": {
"value": "Просмотр команды",
},
"zh": {
"value": "预览命令",
},
},
"arg_save_btn": {
"en": {
"value": "Save arguments",
},
"ru": {
"value": "Сохранить аргументы",
},
"zh": {
"value": "保存训练参数",
},
},
"arg_load_btn": {
"en": {
"value": "Load arguments",
},
"ru": {
"value": "Загрузить аргументы",
},
"zh": {
"value": "载入训练参数",
},
},
"start_btn": {
"en": {
"value": "Start",
},
"ru": {
"value": "Начать",
},
"zh": {
"value": "开始",
},
},
"stop_btn": {
"en": {
"value": "Abort",
},
"ru": {
"value": "Прервать",
},
"zh": {
"value": "中断",
},
},
"output_dir": {
"en": {
"label": "Output dir",
"info": "Directory for saving results.",
},
"ru": {
"label": "Выходной каталог",
"info": "Каталог для сохранения результатов.",
},
"zh": {
"label": "输出目录",
"info": "保存结果的路径。",
},
},
"config_path": {
"en": {
"label": "Config path",
"info": "Path to config saving arguments.",
},
"ru": {
"label": "Путь к конфигурации",
"info": "Путь для сохранения аргументов конфигурации.",
},
"zh": {
"label": "配置路径",
"info": "保存训练参数的配置文件路径。",
},
},
"device_count": {
"en": {
"label": "Device count",
"info": "Number of devices available.",
},
"ru": {
"label": "Количество устройств",
"info": "Количество доступных устройств.",
},
"zh": {
"label": "设备数量",
"info": "当前可用的运算设备数。",
},
},
"ds_stage": {
"en": {
"label": "DeepSpeed stage",
"info": "DeepSpeed stage for distributed training.",
},
"ru": {
"label": "Этап DeepSpeed",
"info": "Этап DeepSpeed для распределенного обучения.",
},
"zh": {
"label": "DeepSpeed stage",
"info": "多卡训练的 DeepSpeed stage。",
},
},
"ds_offload": {
"en": {
"label": "Enable offload",
"info": "Enable DeepSpeed offload (slow down training).",
},
"ru": {
"label": "Включить выгрузку",
"info": "включить выгрузку DeepSpeed (замедлит обучение).",
},
"zh": {
"label": "使用 offload",
"info": "使用 DeepSpeed offload(会减慢速度)。",
},
},
"output_box": {
"en": {
"value": "Ready.",
},
"ru": {
"value": "Готово.",
},
"zh": {
"value": "准备就绪。",
},
},
"loss_viewer": {
"en": {
"label": "Loss",
},
"ru": {
"label": "Потери",
},
"zh": {
"label": "损失",
},
},
"predict": {
"en": {
"label": "Save predictions",
},
"ru": {
"label": "Сохранить предсказания",
},
"zh": {
"label": "保存预测结果",
},
},
"infer_backend": {
"en": {
"label": "Inference engine",
},
"ru": {
"label": "Инференс движок",
},
"zh": {
"label": "推理引擎",
},
},
"infer_dtype": {
"en": {
"label": "Inference data type",
},
"ru": {
"label": "Тип данных для вывода",
},
"zh": {
"label": "推理数据类型",
},
},
"load_btn": {
"en": {
"value": "Load model",
},
"ru": {
"value": "Загрузить модель",
},
"zh": {
"value": "加载模型",
},
},
"unload_btn": {
"en": {
"value": "Unload model",
},
"ru": {
"value": "Выгрузить модель",
},
"zh": {
"value": "卸载模型",
},
},
"info_box": {
"en": {
"value": "Model unloaded, please load a model first.",
},
"ru": {
"value": "Модель не загружена, загрузите модель сначала.",
},
"zh": {
"value": "模型未加载,请先加载模型。",
},
},
"role": {
"en": {
"label": "Role",
},
"ru": {
"label": "Роль",
},
"zh": {
"label": "角色",
},
},
"system": {
"en": {
"placeholder": "System prompt (optional)",
},
"ru": {
"placeholder": "Системный запрос (по желанию)",
},
"zh": {
"placeholder": "系统提示词(非必填)",
},
},
"tools": {
"en": {
"placeholder": "Tools (optional)",
},
"ru": {
"placeholder": "Инструменты (по желанию)",
},
"zh": {
"placeholder": "工具列表(非必填)",
},
},
"image": {
"en": {
"label": "Image (optional)",
},
"ru": {
"label": "Изображение (по желанию)",
},
"zh": {
"label": "图像(非必填)",
},
},
"query": {
"en": {
"placeholder": "Input...",
},
"ru": {
"placeholder": "Ввод...",
},
"zh": {
"placeholder": "输入...",
},
},
"submit_btn": {
"en": {
"value": "Submit",
},
"ru": {
"value": "Отправить",
},
"zh": {
"value": "提交",
},
},
"max_length": {
"en": {
"label": "Maximum length",
},
"ru": {
"label": "Максимальная длина",
},
"zh": {
"label": "最大长度",
},
},
"max_new_tokens": {
"en": {
"label": "Maximum new tokens",
},
"ru": {
"label": "Максимальное количество новых токенов",
},
"zh": {
"label": "最大生成长度",
},
},
"top_p": {
"en": {
"label": "Top-p",
},
"ru": {
"label": "Лучшие-p",
},
"zh": {
"label": "Top-p 采样值",
},
},
"temperature": {
"en": {
"label": "Temperature",
},
"ru": {
"label": "Температура",
},
"zh": {
"label": "温度系数",
},
},
"clear_btn": {
"en": {
"value": "Clear history",
},
"ru": {
"value": "Очистить историю",
},
"zh": {
"value": "清空历史",
},
},
"export_size": {
"en": {
"label": "Max shard size (GB)",
"info": "The maximum size for a model file.",
},
"ru": {
"label": "Максимальный размер фрагмента (ГБ)",
"info": "Максимальный размер файла модели.",
},
"zh": {
"label": "最大分块大小(GB)",
"info": "单个模型文件的最大大小。",
},
},
"export_quantization_bit": {
"en": {
"label": "Export quantization bit.",
"info": "Quantizing the exported model.",
},
"ru": {
"label": "Экспорт бита квантования",
"info": "Квантование экспортируемой модели.",
},
"zh": {
"label": "导出量化等级",
"info": "量化导出模型。",
},
},
"export_quantization_dataset": {
"en": {
"label": "Export quantization dataset",
"info": "The calibration dataset used for quantization.",
},
"ru": {
"label": "Экспорт набора данных для квантования",
"info": "Набор данных калибровки, используемый для квантования.",
},
"zh": {
"label": "导出量化数据集",
"info": "量化过程中使用的校准数据集。",
},
},
"export_device": {
"en": {
"label": "Export device",
"info": "Which device should be used to export model.",
},
"ru": {
"label": "Экспорт устройство",
"info": "Какое устройство следует использовать для экспорта модели.",
},
"zh": {
"label": "导出设备",
"info": "导出模型使用的设备类型。",
},
},
"export_legacy_format": {
"en": {
"label": "Export legacy format",
"info": "Do not use safetensors to save the model.",
},
"ru": {
"label": "Экспорт в устаревший формат",
"info": "Не использовать safetensors для сохранения модели.",
},
"zh": {
"label": "导出旧格式",
"info": "不使用 safetensors 格式保存模型。",
},
},
"export_dir": {
"en": {
"label": "Export dir",
"info": "Directory to save exported model.",
},
"ru": {
"label": "Каталог экспорта",
"info": "Каталог для сохранения экспортированной модели.",
},
"zh": {
"label": "导出目录",
"info": "保存导出模型的文件夹路径。",
},
},
"export_hub_model_id": {
"en": {
"label": "HF Hub ID (optional)",
"info": "Repo ID for uploading model to Hugging Face hub.",
},
"ru": {
"label": "HF Hub ID (опционально)",
"info": "Идентификатор репозитория для загрузки модели на Hugging Face hub.",
},
"zh": {
"label": "HF Hub ID(非必填)",
"info": "用于将模型上传至 Hugging Face Hub 的仓库 ID。",
},
},
"export_btn": {
"en": {
"value": "Export",
},
"ru": {
"value": "Экспорт",
},
"zh": {
"value": "开始导出",
},
},
}
ALERTS = {
"err_conflict": {
"en": "A process is in running, please abort it first.",
"ru": "Процесс уже запущен, пожалуйста, сначала прервите его.",
"zh": "任务已存在,请先中断训练。",
},
"err_exists": {
"en": "You have loaded a model, please unload it first.",
"ru": "Вы загрузили модель, сначала разгрузите ее.",
"zh": "模型已存在,请先卸载模型。",
},
"err_no_model": {
"en": "Please select a model.",
"ru": "Пожалуйста, выберите модель.",
"zh": "请选择模型。",
},
"err_no_path": {
"en": "Model not found.",
"ru": "Модель не найдена.",
"zh": "模型未找到。",
},
"err_no_dataset": {
"en": "Please choose a dataset.",
"ru": "Пожалуйста, выберите набор данных.",
"zh": "请选择数据集。",
},
"err_no_adapter": {
"en": "Please select an adapter.",
"ru": "Пожалуйста, выберите адаптер.",
"zh": "请选择适配器。",
},
"err_no_output_dir": {
"en": "Please provide output dir.",
"ru": "Пожалуйста, укажите выходную директорию.",
"zh": "请填写输出目录。",
},
"err_no_reward_model": {
"en": "Please select a reward model.",
"ru": "Пожалуйста, выберите модель вознаграждения.",
"zh": "请选择奖励模型。",
},
"err_no_export_dir": {
"en": "Please provide export dir.",
"ru": "Пожалуйста, укажите каталог для экспорта.",
"zh": "请填写导出目录。",
},
"err_gptq_lora": {
"en": "Please merge adapters before quantizing the model.",
"ru": "Пожалуйста, объедините адаптеры перед квантованием модели.",
"zh": "量化模型前请先合并适配器。",
},
"err_failed": {
"en": "Failed.",
"ru": "Ошибка.",
"zh": "训练出错。",
},
"err_demo": {
"en": "Training is unavailable in demo mode, duplicate the space to a private one first.",
"ru": "Обучение недоступно в демонстрационном режиме, сначала скопируйте пространство в частное.",
"zh": "展示模式不支持训练,请先复制到私人空间。",
},
"err_tool_name": {
"en": "Tool name not found.",
"ru": "Имя инструмента не найдено.",
"zh": "工具名称未找到。",
},
"err_json_schema": {
"en": "Invalid JSON schema.",
"ru": "Неверная схема JSON.",
"zh": "Json 格式错误。",
},
"err_config_not_found": {
"en": "Config file is not found.",
"ru": "Файл конфигурации не найден.",
"zh": "未找到配置文件。",
},
"warn_no_cuda": {
"en": "CUDA environment was not detected.",
"ru": "Среда CUDA не обнаружена.",
"zh": "未检测到 CUDA 环境。",
},
"warn_output_dir_exists": {
"en": "Output dir already exists, will resume training from here.",
"ru": "Выходной каталог уже существует, обучение будет продолжено отсюда.",
"zh": "输出目录已存在,将从该断点恢复训练。",
},
"info_aborting": {
"en": "Aborted, wait for terminating...",
"ru": "Прервано, ожидание завершения...",
"zh": "训练中断,正在等待进程结束……",
},
"info_aborted": {
"en": "Ready.",
"ru": "Готово.",
"zh": "准备就绪。",
},
"info_finished": {
"en": "Finished.",
"ru": "Завершено.",
"zh": "训练完毕。",
},
"info_config_saved": {
"en": "Arguments have been saved at: ",
"ru": "Аргументы были сохранены по адресу: ",
"zh": "训练参数已保存至:",
},
"info_config_loaded": {
"en": "Arguments have been restored.",
"ru": "Аргументы были восстановлены.",
"zh": "训练参数已载入。",
},
"info_loading": {
"en": "Loading model...",
"ru": "Загрузка модели...",
"zh": "加载中……",
},
"info_unloading": {
"en": "Unloading model...",
"ru": "Выгрузка модели...",
"zh": "卸载中……",
},
"info_loaded": {
"en": "Model loaded, now you can chat with your model!",
"ru": "Модель загружена, теперь вы можете общаться с вашей моделью!",
"zh": "模型已加载,可以开始聊天了!",
},
"info_unloaded": {
"en": "Model unloaded.",
"ru": "Модель выгружена.",
"zh": "模型已卸载。",
},
"info_exporting": {
"en": "Exporting model...",
"ru": "Экспорт модели...",
"zh": "正在导出模型……",
},
"info_exported": {
"en": "Model exported.",
"ru": "Модель экспортирована.",
"zh": "模型导出完成。",
},
}
| LLaMA-Factory/src/llamafactory/webui/locales.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/webui/locales.py",
"repo_id": "LLaMA-Factory",
"token_count": 31802
} | 5 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from peft import LoraModel, PeftModel
from transformers import AutoModelForCausalLM
from llamafactory.extras.misc import get_current_device
from llamafactory.hparams import get_infer_args, get_train_args
from llamafactory.model import load_model, load_tokenizer
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
TINY_LLAMA_PISSA = os.environ.get("TINY_LLAMA_ADAPTER", "llamafactory/tiny-random-Llama-3-pissa")
TRAIN_ARGS = {
"model_name_or_path": TINY_LLAMA,
"stage": "sft",
"do_train": True,
"finetuning_type": "lora",
"pissa_init": True,
"pissa_iter": -1,
"dataset": "llamafactory/tiny-supervised-dataset",
"dataset_dir": "ONLINE",
"template": "llama3",
"cutoff_len": 1024,
"overwrite_cache": True,
"output_dir": "dummy_dir",
"overwrite_output_dir": True,
"fp16": True,
}
INFER_ARGS = {
"model_name_or_path": TINY_LLAMA_PISSA,
"adapter_name_or_path": TINY_LLAMA_PISSA,
"adapter_folder": "pissa_init",
"finetuning_type": "lora",
"template": "llama3",
"infer_dtype": "float16",
}
def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module"):
state_dict_a = model_a.state_dict()
state_dict_b = model_b.state_dict()
assert set(state_dict_a.keys()) == set(state_dict_b.keys())
for name in state_dict_a.keys():
assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5)
def test_pissa_init():
model_args, _, _, finetuning_args, _ = get_train_args(TRAIN_ARGS)
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
base_model = AutoModelForCausalLM.from_pretrained(
TINY_LLAMA_PISSA, torch_dtype=torch.float16, device_map=get_current_device()
)
ref_model = PeftModel.from_pretrained(base_model, TINY_LLAMA_PISSA, subfolder="pissa_init", is_trainable=True)
for param in filter(lambda p: p.requires_grad, ref_model.parameters()):
param.data = param.data.to(torch.float32)
compare_model(model, ref_model)
def test_pissa_inference():
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
tokenizer_module = load_tokenizer(model_args)
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=False)
base_model = AutoModelForCausalLM.from_pretrained(
TINY_LLAMA_PISSA, torch_dtype=torch.float16, device_map=get_current_device()
)
ref_model: "LoraModel" = PeftModel.from_pretrained(base_model, TINY_LLAMA_PISSA, subfolder="pissa_init")
ref_model = ref_model.merge_and_unload()
compare_model(model, ref_model)
| LLaMA-Factory/tests/model/test_pissa.py/0 | {
"file_path": "LLaMA-Factory/tests/model/test_pissa.py",
"repo_id": "LLaMA-Factory",
"token_count": 1322
} | 6 |
<component name="ProjectDictionaryState">
<dictionary name="ubuntu">
<words>
<w>tablename</w>
</words>
</dictionary>
</component> | PaddleDetection/.idea/dictionaries/ubuntu.xml/0 | {
"file_path": "PaddleDetection/.idea/dictionaries/ubuntu.xml",
"repo_id": "PaddleDetection",
"token_count": 57
} | 7 |
_BASE_: [
'../datasets/coco_instance.yml',
'../runtime.yml',
'_base_/optimizer_1x.yml',
'_base_/cascade_mask_rcnn_r50_fpn.yml',
'_base_/cascade_mask_fpn_reader.yml',
]
weights: output/cascade_mask_rcnn_r50_fpn_1x_coco/model_final
| PaddleDetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 122
} | 8 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'_base_/optimizer_140e.yml',
'_base_/centernet_r50.yml',
'_base_/centernet_reader.yml',
]
weights: output/centernet_r50_140e_coco/model_final
| PaddleDetection/configs/centernet/centernet_r50_140e_coco.yml/0 | {
"file_path": "PaddleDetection/configs/centernet/centernet_r50_140e_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 106
} | 9 |
metric: MCMOT
num_classes: 10
# using VisDrone2019 MOT dataset with 10 classes as default, you can modify it for your needs.
# for MCMOT training
TrainDataset:
!MCMOTDataSet
dataset_dir: dataset/mot
image_lists: ['visdrone_mcmot.train']
data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']
label_list: label_list.txt
# for MCMOT evaluation
# If you want to change the MCMOT evaluation dataset, please modify 'data_root'
EvalMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
data_root: visdrone_mcmot/images/val
keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT
# for MCMOT video inference
TestMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
keep_ori_im: True # set True if save visualization images or video
| PaddleDetection/configs/datasets/mcmot.yml/0 | {
"file_path": "PaddleDetection/configs/datasets/mcmot.yml",
"repo_id": "PaddleDetection",
"token_count": 285
} | 10 |
# DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection
## Introduction
[DINO](https://arxiv.org/abs/2203.03605) is an object detection model based on DETR. We reproduced the model of the paper.
## Model Zoo
| Backbone | Model | Epochs | Box AP | Config | Log | Download |
|:------:|:---------------:|:------:|:------:|:---------------------------------------:|:-------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------:|
| R-50 | dino_r50_4scale | 12 | 49.5 | [config](./dino_r50_4scale_1x_coco.yml) | [log](https://bj.bcebos.com/v1/paddledet/logs/dino_r50_4scale_1x_coco_49.5.log) | [model](https://paddledet.bj.bcebos.com/models/dino_r50_4scale_1x_coco.pdparams) |
| R-50 | dino_r50_4scale | 24 | 50.8 | [config](./dino_r50_4scale_2x_coco.yml) | [log](https://bj.bcebos.com/v1/paddledet/logs/dino_r50_4scale_2x_coco_50.8.log) | [model](https://paddledet.bj.bcebos.com/models/dino_r50_4scale_2x_coco.pdparams) |
**Notes:**
- DINO is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.
- DINO uses 4GPU to train.
GPU multi-card training
```bash
python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/dino/dino_r50_4scale_1x_coco.yml --fleet --eval
```
## Custom Operator
- Multi-scale deformable attention custom operator see [here](../../ppdet/modeling/transformers/ext_op).
## Citations
```
@misc{zhang2022dino,
title={DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection},
author={Hao Zhang and Feng Li and Shilong Liu and Lei Zhang and Hang Su and Jun Zhu and Lionel M. Ni and Heung-Yeung Shum},
year={2022},
eprint={2203.03605},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
| PaddleDetection/configs/dino/README.md/0 | {
"file_path": "PaddleDetection/configs/dino/README.md",
"repo_id": "PaddleDetection",
"token_count": 894
} | 11 |
_BASE_: [
'../datasets/wider_face.yml',
'../runtime.yml',
'_base_/optimizer_1000e.yml',
'_base_/blazeface.yml',
'_base_/face_reader.yml',
]
weights: output/blazeface_1000e/model_final
multi_scale_eval: True
| PaddleDetection/configs/face_detection/blazeface_1000e.yml/0 | {
"file_path": "PaddleDetection/configs/face_detection/blazeface_1000e.yml",
"repo_id": "PaddleDetection",
"token_count": 103
} | 12 |
architecture: FCOS
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams
FCOS:
backbone: ResNet
neck: FPN
fcos_head: FCOSHead
ResNet:
depth: 50
variant: 'b'
norm_type: bn
freeze_at: 0 # res2
return_idx: [1, 2, 3]
num_stages: 4
FPN:
out_channel: 256
spatial_scales: [0.125, 0.0625, 0.03125]
extra_stage: 2
has_extra_convs: True
use_c5: False
FCOSHead:
fcos_feat:
name: FCOSFeat
feat_in: 256
feat_out: 256
num_convs: 4
norm_type: "gn"
use_dcn: False
fpn_stride: [8, 16, 32, 64, 128]
prior_prob: 0.01
norm_reg_targets: True
centerness_on_reg: True
num_shift: 0.5
fcos_loss:
name: FCOSLoss
loss_alpha: 0.25
loss_gamma: 2.0
iou_loss_type: "giou"
reg_weights: 1.0
nms:
name: MultiClassNMS
nms_top_k: 1000
keep_top_k: 100
score_threshold: 0.025
nms_threshold: 0.6
| PaddleDetection/configs/fcos/_base_/fcos_r50_fpn.yml/0 | {
"file_path": "PaddleDetection/configs/fcos/_base_/fcos_r50_fpn.yml",
"repo_id": "PaddleDetection",
"token_count": 447
} | 13 |
# Group Normalization
## Model Zoo
| 骨架网络 | 网络类型 | 每张GPU图片个数 | 学习率策略 |推理时间(fps)| Box AP | Mask AP | 下载 | 配置文件 |
| :------------- | :------------- | :-----------: | :------: | :--------: |:-----: | :-----: | :----: | :----: |
| ResNet50-FPN | Faster | 1 | 2x | - | 41.9 | - | [下载链接](https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_gn_2x_coco.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/gn/faster_rcnn_r50_fpn_gn_2x_coco.yml) |
| ResNet50-FPN | Mask | 1 | 2x | - | 42.3 | 38.4 | [下载链接](https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_gn_2x_coco.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/gn/mask_rcnn_r50_fpn_gn_2x_coco.yml) |
| ResNet50-FPN | Cascade Faster | 1 | 2x | - | 44.6 | - | [下载链接](https://paddledet.bj.bcebos.com/models/cascade_rcnn_r50_fpn_gn_2x_coco.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/gn/cascade_rcnn_r50_fpn_gn_2x_coco.yml) |
| ResNet50-FPN | Cacade Mask | 1 | 2x | - | 45.0 | 39.3 | [下载链接](https://paddledet.bj.bcebos.com/models/cascade_mask_rcnn_r50_fpn_gn_2x_coco.pdparams) | [配置文件](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/gn/cascade_mask_rcnn_r50_fpn_gn_2x_coco.yml) |
**注意:** Faster R-CNN baseline仅使用 `2fc` head,而此处使用[`4conv1fc` head](https://arxiv.org/abs/1803.08494)(4层conv之间使用GN),并且FPN也使用GN,而对于Mask R-CNN是在mask head的4层conv之间也使用GN。
## Citations
```
@inproceedings{wu2018group,
title={Group Normalization},
author={Wu, Yuxin and He, Kaiming},
booktitle={Proceedings of the European Conference on Computer Vision (ECCV)},
year={2018}
}
```
| PaddleDetection/configs/gn/README.md/0 | {
"file_path": "PaddleDetection/configs/gn/README.md",
"repo_id": "PaddleDetection",
"token_count": 1093
} | 14 |
_BASE_: [
'mask_rcnn_r50_fpn_1x_coco.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet101_pretrained.pdparams
weights: output/mask_rcnn_r101_fpn_1x_coco/model_final
ResNet:
# index 0 stands for res2
depth: 101
norm_type: bn
freeze_at: 0
return_idx: [0,1,2,3]
num_stages: 4
| PaddleDetection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 150
} | 15 |
简体中文 | [English](README.md)
# BOT_SORT (BoT-SORT: Robust Associations Multi-Pedestrian Tracking)
## 内容
- [简介](#简介)
- [模型库](#模型库)
- [快速开始](#快速开始)
- [引用](#引用)
## 简介
[BOT_SORT](https://arxiv.org/pdf/2206.14651v2.pdf)(BoT-SORT: Robust Associations Multi-Pedestrian Tracking)。此处提供了常用检测器的配置作为参考。由于训练数据集、输入尺度、训练epoch数、NMS阈值设置等的不同均会导致模型精度和性能的差异,请自行根据需求进行适配。
## 模型库
### BOT_SORT在MOT-17 half Val Set上结果
| 检测训练数据集 | 检测器 | 输入尺度 | 检测mAP | MOTA | IDF1 | 配置文件 |
| :-------- | :----- | :----: | :------: | :----: |:-----: |:----: |
| MOT-17 half train | PP-YOLOE-l | 640x640 | 52.7 | 55.5 | 64.2 |[配置文件](./botsort_ppyoloe.yml) |
**注意:**
- 模型权重下载链接在配置文件中的```det_weights```,运行验证的命令即可自动下载。
- **MOT17-half train**是MOT17的train序列(共7个)每个视频的前一半帧的图片和标注组成的数据集,而为了验证精度可以都用**MOT17-half val**数据集去评估,它是每个视频的后一半帧组成的,数据集可以从[此链接](https://bj.bcebos.com/v1/paddledet/data/mot/MOT17.zip)下载,并解压放在`dataset/mot/`文件夹下。
- BOT_SORT的训练是单独的检测器训练MOT数据集,推理是组装跟踪器去评估MOT指标,单独的检测模型也可以评估检测指标。
- BOT_SORT的导出部署,是单独导出检测模型,再组装跟踪器运行的,参照[PP-Tracking](../../../deploy/pptracking/python)。
- BOT_SORT是PP-Human和PP-Vehicle等Pipeline分析项目跟踪方向的主要方案,具体使用参照[Pipeline](../../../deploy/pipeline)和[MOT](../../../deploy/pipeline/docs/tutorials/pphuman_mot.md)。
## 快速开始
### 1. 训练
通过如下命令一键式启动训练和评估
```bash
#单卡训练
CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/mot/bytetrack/detector/ppyoloe_crn_l_36e_640x640_mot17half.yml --eval --amp
#多卡训练
python -m paddle.distributed.launch --log_dir=ppyoloe --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/mot/bytetrack/detector/ppyoloe_crn_l_36e_640x640_mot17half.yml --eval --amp
```
### 2. 评估
#### 2.1 评估检测效果
```bash
CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/mot/bytetrack/detector/ppyoloe_crn_l_36e_640x640_mot17half.yml
```
**注意:**
- 评估检测使用的是```tools/eval.py```, 评估跟踪使用的是```tools/eval_mot.py```。
#### 2.2 评估跟踪效果
```bash
CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/botsort/botsort_ppyoloe.yml --scaled=True
```
**注意:**
- `--scaled`表示在模型输出结果的坐标是否已经是缩放回原图的,如果使用的检测模型是JDE YOLOv3则为False,如果使用通用检测模型则为True, 默认值是False。
- 跟踪结果会存于`{output_dir}/mot_results/`中,里面每个视频序列对应一个txt,每个txt文件每行信息是`frame,id,x1,y1,w,h,score,-1,-1,-1`, 此外`{output_dir}`可通过`--output_dir`设置。
### 3. 导出预测模型
```bash
python tools/export_model.py -c configs/mot/bytetrack/detector/ppyoloe_crn_l_36e_640x640_mot17half.yml --output_dir=output_inference -o weights=https://bj.bcebos.com/v1/paddledet/models/mot/ppyoloe_crn_l_36e_640x640_mot17half.pdparams
```
### 4. 用导出的模型基于Python去预测
```bash
# 下载demo视频
wget https://bj.bcebos.com/v1/paddledet/data/mot/demo/mot17_demo.mp4
CUDA_VISIBLE_DEVICES=0 python deploy/pptracking/python/mot_sde_infer.py --model_dir=output_inference/ppyoloe_crn_l_36e_640x640_mot17half --tracker_config=deploy/pptracking/python/tracker_config.yml --video_file=mot17_demo.mp4 --device=GPU --threshold=0.5
```
**注意:**
- 运行前需要手动修改`tracker_config.yml`的跟踪器类型为`type: BOTSORTTracker`。
- 跟踪模型是对视频进行预测,不支持单张图的预测,默认保存跟踪结果可视化后的视频,可添加`--save_mot_txts`(对每个视频保存一个txt)或`--save_mot_txt_per_img`(对每张图片保存一个txt)表示保存跟踪结果的txt文件,或`--save_images`表示保存跟踪结果可视化图片。
- 跟踪结果txt文件每行信息是`frame,id,x1,y1,w,h,score,-1,-1,-1`。
## 引用
```
@article{aharon2022bot,
title={BoT-SORT: Robust Associations Multi-Pedestrian Tracking},
author={Aharon, Nir and Orfaig, Roy and Bobrovsky, Ben-Zion},
journal={arXiv preprint arXiv:2206.14651},
year={2022}
}
```
| PaddleDetection/configs/mot/botsort/README_cn.md/0 | {
"file_path": "PaddleDetection/configs/mot/botsort/README_cn.md",
"repo_id": "PaddleDetection",
"token_count": 2753
} | 16 |
简体中文 | [English](README.md)
# DeepSORT的ReID模型
## 简介
[DeepSORT](https://arxiv.org/abs/1812.00442)(Deep Cosine Metric Learning SORT) 由检测器和ReID模型串联组合而成,此处提供了几个常用ReID模型的配置作为DeepSORT使用的参考。
## 模型库
### 在Market1501行人重识别数据集上的结果
| 骨架网络 | 网络类型 | Params | FPS | mAP | Top1 | Top5 | 下载链接 | 配置文件 |
| :-------------: | :-----------------: | :-------: | :------: | :-------: | :-------: | :-------: | :-------: | :-------: |
| ResNet-101 | PCB Pyramid Embedding | 289M | --- | 86.31 | 94.95 | 98.28 | [下载链接](https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pcb_pyramid_r101.pdparams) | [配置文件](./deepsort_pcb_pyramid_r101.yml) |
| PPLCNet-2.5x | PPLCNet Embedding | 36M | --- | 71.59 | 87.38 | 95.49 | [下载链接](https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pplcnet.pdparams) | [配置文件](./deepsort_pplcnet.yml) |
### 在VERI-Wild车辆重识别数据集上的结果
| 骨架网络 | 网络类型 | Params | FPS | mAP | Top1 | Top5 | 下载链接 | 配置文件 |
| :-------------: | :-----------------: | :-------: | :------: | :-------: | :-------: | :-------: | :-------: | :-------: |
| PPLCNet-2.5x | PPLCNet Embedding | 93M | --- | 82.44 | 93.54 | 98.53 | [下载链接](https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pplcnet_vehicle.pdparams) | [配置文件](./deepsort_pplcnet_vehicle.yml) |
**注意:**
- ReID模型由[PaddleClas](https://github.com/PaddlePaddle/PaddleClas)提供,具体训练流程和代码待PaddleClas公布.
- 行人跟踪请用**Market1501**行人重识别数据集训练的ReID模型结合行人检测器去使用。
- 车辆跟踪请用**VERI-Wild**车辆重识别数据集训练的ReID模型结合车辆检测器去使用。
| PaddleDetection/configs/mot/deepsort/reid/README_cn.md/0 | {
"file_path": "PaddleDetection/configs/mot/deepsort/reid/README_cn.md",
"repo_id": "PaddleDetection",
"token_count": 1262
} | 17 |
_BASE_: [
'fairmot_dla34_30e_1088x608.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams
weights: output/fairmot_dla34_30e_1088x608_airplane/model_final
JDETracker:
conf_thres: 0.4
tracked_thresh: 0.4
metric_type: cosine
min_box_area: 0
vertical_ratio: 0
# for MOT training
TrainDataset:
!MOTDataSet
dataset_dir: dataset/mot
image_lists: ['airplane.train']
data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']
# for MOT evaluation
# If you want to change the MOT evaluation dataset, please modify 'data_root'
EvalMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
data_root: airplane/images/train
keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT
# for MOT video inference
TestMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
keep_ori_im: True # set True if save visualization images or video
| PaddleDetection/configs/mot/fairmot/fairmot_dla34_30e_1088x608_airplane.yml/0 | {
"file_path": "PaddleDetection/configs/mot/fairmot/fairmot_dla34_30e_1088x608_airplane.yml",
"repo_id": "PaddleDetection",
"token_count": 366
} | 18 |
English | [简体中文](README_cn.md)
# MTMCT (Multi-Target Multi-Camera Tracking)
## 内容
- [简介](#简介)
- [模型库](#模型库)
- [快速开始](#快速开始)
- [引用](#引用)
## 简介
MTMCT (Multi-Target Multi-Camera Tracking) 跨镜头多目标跟踪是某一场景下的不同摄像头拍摄的视频进行多目标跟踪,是跟踪领域一个非常重要的研究课题,在安防监控、自动驾驶、智慧城市等行业起着重要作用。MTMCT预测的是同一场景下的不同摄像头拍摄的视频,其方法的效果受场景先验知识和相机数量角度拓扑结构等信息的影响较大,PaddleDetection此处提供的是去除场景和相机相关优化方法后的一个基础版本的MTMCT算法实现,如果要继续提高效果,需要专门针对该场景和相机信息设计后处理算法。此处选用DeepSORT方案做MTMCT,为了达到实时性选用了PaddleDetection自研的[PP-YOLOv2](../../ppyolo/)和轻量级网络[PP-PicoDet](../../picodet/)作为检测器,选用PaddleClas自研的轻量级网络PP-LCNet作为ReID模型。
MTMCT是[PP-Tracking](../../../deploy/pptracking)项目中一个非常重要的方向,[PP-Tracking](../../../deploy/pptracking/README.md)是基于PaddlePaddle深度学习框架的业界首个开源实时跟踪系统。针对实际业务的难点痛点,PP-Tracking内置行人车辆跟踪、跨镜头跟踪、多类别跟踪、小目标跟踪及流量计数等能力与产业应用,同时提供可视化开发界面。模型集成目标检测、轻量级ReID、多目标跟踪等算法,进一步提升PP-Tracking在服务器端部署性能。同时支持Python、C++部署,适配Linux、NVIDIA Jetson等多个平台环境。具体可前往该目录使用。
### AI Studio公开项目案例
PP-Tracking 提供了AI Studio公开项目案例,教程请参考[PP-Tracking之手把手玩转多目标跟踪](https://aistudio.baidu.com/aistudio/projectdetail/3022582)。
## 模型库
### DeepSORT在 AIC21 MTMCT(CityFlow) 车辆跨境跟踪数据集Test集上的结果
| 检测器 | 输入尺度 | ReID | 场景 | Tricks | IDF1 | IDP | IDR | Precision | Recall | FPS | 检测器下载链接 | ReID下载链接 |
| :--------- | :--------- | :------- | :----- | :------ |:----- |:------- |:----- |:--------- |:-------- |:----- |:------ | :------ |
| PP-PicoDet | 640x640 | PP-LCNet | S06 | - | 0.3617 | 0.4417 | 0.3062 | 0.6266 | 0.4343 | - |[Detector](https://paddledet.bj.bcebos.com/models/mot/deepsort/picodet_l_640_aic21mtmct_vehicle.tar) |[ReID](https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pplcnet_vehicle.tar) |
| PPYOLOv2 | 640x640 | PP-LCNet | S06 | - | 0.4450 | 0.4611 | 0.4300 | 0.6385 | 0.5954 | - |[Detector](https://paddledet.bj.bcebos.com/models/mot/deepsort/ppyolov2_r50vd_dcn_365e_aic21mtmct_vehicle.tar) |[ReID](https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pplcnet_vehicle.tar) |
**注意:**
- S06是AIC21 MTMCT数据集Test集的场景名称,S06场景下有’c041,c042,c043,c044,c045,c046‘共6个摄像头的视频。
- 由于在部署过程中只需要前向参数,此处提供的是已经导出的模型,解压后可看到包括`infer_cfg.yml`、`model.pdiparams`、`model.pdiparams.info`和`model.pdmodel`四个文件。
## 数据集准备
对于车辆跨镜头跟踪是选用的[AIC21 MTMCT](https://www.aicitychallenge.org) (CityFlow)车辆跨境跟踪数据集,此处提供PaddleDetection团队整理过后的数据集的下载链接:`wget https://paddledet.bj.bcebos.com/data/mot/aic21mtmct_vehicle.zip`,测试使用的是其中的S06文件夹目录,此外还提供AIC21 MTMCT数据集中S01场景抽出来的极小的一个demo测试数据集:`wget https://paddledet.bj.bcebos.com/data/mot/demo/mtmct-demo.tar`
数据集的处理如下所示:
```
# AIC21 MTMCT原始数据集的目录如下所示:
|——————AIC21_Track3_MTMC_Tracking
|——————cam_framenum (Number of frames below each camera)
|——————cam_loc (Positional relationship between cameras)
|——————cam_timestamp (Time difference between cameras)
|——————eval (evaluation function and ground_truth.txt)
|——————test (S06 dataset)
|——————train (S01,S03,S04 dataset)
|——————validation (S02,S05 dataset)
|——————DataLicenseAgreement_AICityChallenge_2021.pdf
|——————list_cam.txt (List of all camera paths)
|——————ReadMe.txt (Dataset description)
|——————gen_aicity_mtmct_data.py (Camera videos extraction script)
```
需要处理成如下格式:
```
aic21mtmct_vehicle/
├── S01
├── gt
│ ├── gt.txt
├── images
├── c001
│ ├── img1
│ │ ├── 0000001.jpg
│ │ ...
│ ├── roi.jpg
├── c002
...
├── c006
├── S02
...
├── S05
├── S06
├── images
├── c041
├── img1
├── 0000001.jpg
...
├── c042
...
├── c046
├── zone (only for test-set S06 when use camera tricks for testing)
├── c041.png
...
├── c046.png
```
#### 生成S01场景的验证集数据
python gen_aicity_mtmct_data.py ./AIC21_Track3_MTMC_Tracking/train/S01
**注意:**
- AIC21 MTMCT数据集共有6个场景共计46个摄像头的数据,其中S01、S03和S04为训练集,S02和S05为验证集,S06是测试集,S06场景下有’c041,c042,c043,c044,c045,c046‘共6个摄像头的视频。
## 快速开始
### 1. 导出模型
Step 1:下载导出的检测模型
```bash
wget https://paddledet.bj.bcebos.com/models/mot/deepsort/picodet_l_640_aic21mtmct_vehicle.tar
tar -xvf picodet_l_640_aic21mtmct_vehicle.tar
```
Step 2:下载导出的ReID模型
```bash
wget https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pplcnet_vehicle.tar
tar -xvf deepsort_pplcnet_vehicle.tar
```
**注意:**
- PP-PicoDet是轻量级检测模型,其训练请参考[configs/picodet](../../picodet/README.md),并注意修改种类数和数据集路径。
- PP-LCNet是轻量级ReID模型,其训练请参考[PaddleClas](https://github.com/PaddlePaddle/PaddleClas),是在VERI-Wild车辆重识别数据集训练得到的权重,建议直接使用无需重训。
### 2. 用导出的模型基于Python去预测
```bash
# 下载demo测试视频
wget https://paddledet.bj.bcebos.com/data/mot/demo/mtmct-demo.tar
tar -xvf mtmct-demo.tar
# 用导出的PicoDet车辆检测模型和PPLCNet车辆ReID模型去基于Python预测
python deploy/pptracking/python/mot_sde_infer.py --model_dir=picodet_l_640_aic21mtmct_vehicle/ --reid_model_dir=deepsort_pplcnet_vehicle/ --mtmct_dir=mtmct-demo --mtmct_cfg=mtmct_cfg --device=GPU --scaled=True --save_mot_txts --save_images
```
**注意:**
- 跟踪模型是对视频进行预测,不支持单张图的预测,默认保存跟踪结果可视化后的视频,可添加`--save_mot_txts`(对每个视频保存一个txt),或`--save_images`表示保存跟踪结果可视化图片。
- `--scaled`表示在模型输出结果的坐标是否已经是缩放回原图的,如果使用的检测模型是JDE的YOLOv3则为False,如果使用通用检测模型则为True。
- `--mtmct_dir`是MTMCT预测的某个场景的文件夹名字,里面包含该场景不同摄像头拍摄视频的图片文件夹视频,其数量至少为两个。
- `--mtmct_cfg`是MTMCT预测的某个场景的配置文件,里面包含该一些trick操作的开关和该场景摄像头相关设置的文件路径,用户可以自行更改相关路径以及设置某些操作是否启用。
- MTMCT跨镜头跟踪输出结果为视频和txt形式。每个图片文件夹各生成一个可视化的跨镜头跟踪结果,与单镜头跟踪的结果是不同的,单镜头跟踪的结果在几个视频文件夹间是独立无关的。MTMCT的结果txt只有一个,比单镜头跟踪结果txt多了第一列镜头id号,跨镜头跟踪结果txt文件每行信息是`camera_id,frame,id,x1,y1,w,h,-1,-1`。
- MTMCT是[PP-Tracking](../../../deploy/pptracking)项目中的一个非常重要的方向,具体可前往该目录使用。
## 引用
```
@InProceedings{Tang19CityFlow,
author = {Zheng Tang and Milind Naphade and Ming-Yu Liu and Xiaodong Yang and Stan Birchfield and Shuo Wang and Ratnesh Kumar and David Anastasiu and Jenq-Neng Hwang},
title = {CityFlow: A City-Scale Benchmark for Multi-Target Multi-Camera Vehicle Tracking and Re-Identification},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2019},
pages = {8797–8806}
}
```
| PaddleDetection/configs/mot/mtmct/README.md/0 | {
"file_path": "PaddleDetection/configs/mot/mtmct/README.md",
"repo_id": "PaddleDetection",
"token_count": 5227
} | 19 |
_BASE_: [
'../../../../runtime.yml',
'../../_base_/picodet_esnet.yml',
'../../_base_/optimizer_100e.yml',
'../../_base_/picodet_640_reader.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/LCNet_x1_0_pretrained.pdparams
weights: output/picodet_lcnet_x1_0_layout/model_final
find_unused_parameters: True
use_ema: true
cycle_epoch: 10
snapshot_epoch: 1
epoch: 100
PicoDet:
backbone: LCNet
neck: CSPPAN
head: PicoHead
nms_cpu: True
LCNet:
scale: 1.0
feature_maps: [3, 4, 5]
metric: COCO
num_classes: 5
TrainDataset:
name: COCODataSet
image_dir: train
anno_path: train.json
dataset_dir: ./dataset/publaynet/
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
name: COCODataSet
image_dir: val
anno_path: val.json
dataset_dir: ./dataset/publaynet/
TestDataset:
!ImageFolder
anno_path: ./dataset/publaynet/val.json
worker_num: 8
eval_height: &eval_height 800
eval_width: &eval_width 608
eval_size: &eval_size [*eval_height, *eval_width]
TrainReader:
sample_transforms:
- Decode: {}
- RandomCrop: {}
- RandomFlip: {prob: 0.5}
- RandomDistort: {}
batch_transforms:
- BatchRandomResize: {target_size: [[768, 576], [800, 608], [832, 640]], random_size: True, random_interp: True, keep_ratio: False}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_size: 24
shuffle: true
drop_last: true
collate_batch: false
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 2, target_size: [800, 608], keep_ratio: False}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
batch_size: 8
shuffle: false
TestReader:
inputs_def:
image_shape: [1, 3, 800, 608]
sample_transforms:
- Decode: {}
- Resize: {interp: 2, target_size: [800, 608], keep_ratio: False}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
batch_size: 1
shuffle: false
| PaddleDetection/configs/picodet/legacy_model/application/layout_analysis/picodet_lcnet_x1_0_layout.yml/0 | {
"file_path": "PaddleDetection/configs/picodet/legacy_model/application/layout_analysis/picodet_lcnet_x1_0_layout.yml",
"repo_id": "PaddleDetection",
"token_count": 952
} | 20 |
_BASE_: [
'../../datasets/coco_detection.yml',
'../../runtime.yml',
'_base_/picodet_esnet.yml',
'_base_/optimizer_300e.yml',
'_base_/picodet_320_reader.yml',
]
weights: output/picodet_m_320_coco/model_final
find_unused_parameters: True
use_ema: true
cycle_epoch: 40
snapshot_epoch: 10
| PaddleDetection/configs/picodet/legacy_model/picodet_m_320_coco.yml/0 | {
"file_path": "PaddleDetection/configs/picodet/legacy_model/picodet_m_320_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 139
} | 21 |
_BASE_: [
'../../datasets/coco_detection.yml',
'../../runtime.yml',
'../../yolov3/_base_/optimizer_270e.yml',
'../../yolov3/_base_/yolov3_darknet53.yml',
'../../yolov3/_base_/yolov3_reader.yml',
]
snapshot_epoch: 5
weights: https://paddledet.bj.bcebos.com/models/pedestrian_yolov3_darknet.pdparams
num_classes: 1
TrainDataset:
!COCODataSet
dataset_dir: dataset/pedestrian
anno_path: annotations/instances_train2017.json
image_dir: train2017
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
dataset_dir: dataset/pedestrian
anno_path: annotations/instances_val2017.json
image_dir: val2017
TestDataset:
!ImageFolder
anno_path: configs/pphuman/pedestrian_yolov3/pedestrian.json
| PaddleDetection/configs/pphuman/pedestrian_yolov3/pedestrian_yolov3_darknet.yml/0 | {
"file_path": "PaddleDetection/configs/pphuman/pedestrian_yolov3/pedestrian_yolov3_darknet.yml",
"repo_id": "PaddleDetection",
"token_count": 339
} | 22 |
English | [简体中文](README_cn.md)
# PaddleDetection applied for specific scenarios
We provide some models implemented by PaddlePaddle to detect objects in specific scenarios, users can download the models and use them in these scenarios.
| Task | Algorithm | Box AP | Download | Configs |
|:---------------------|:---------:|:------:| :-------------------------------------------------------------------------------------: |:------:|
| Vehicle Detection | YOLOv3 | 54.5 | [model](https://paddledet.bj.bcebos.com/models/vehicle_yolov3_darknet.pdparams) | [config](./vehicle_yolov3_darknet.yml) |
## Vehicle Detection
One of major applications of vehichle detection is traffic monitoring. In this scenary, vehicles to be detected are mostly captured by the cameras mounted on top of traffic light columns.
### 1. Network
The network for detecting vehicles is YOLOv3, the backbone of which is Dacknet53.
### 2. Configuration for training
PaddleDetection provides users with a configuration file [yolov3_darknet53_270e_coco.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/yolov3/yolov3_darknet53_270e_coco.yml) to train YOLOv3 on the COCO dataset, compared with this file, we modify some parameters as followed to conduct the training for vehicle detection:
* num_classes: 6
* anchors: [[8, 9], [10, 23], [19, 15], [23, 33], [40, 25], [54, 50], [101, 80], [139, 145], [253, 224]]
* nms/nms_top_k: 400
* nms/score_threshold: 0.005
* dataset_dir: dataset/vehicle
### 3. Accuracy
The accuracy of the model trained and evaluated on our private data is shown as followed:
AP at IoU=.50:.05:.95 is 0.545.
AP at IoU=.50 is 0.764.
### 4. Inference
Users can employ the model to conduct the inference:
```
export CUDA_VISIBLE_DEVICES=0
python -u tools/infer.py -c configs/ppvehicle/vehicle_yolov3/vehicle_yolov3_darknet.yml \
-o weights=https://paddledet.bj.bcebos.com/models/vehicle_yolov3_darknet.pdparams \
--infer_dir configs/ppvehicle/vehicle_yolov3/demo \
--draw_threshold 0.2 \
--output_dir configs/ppvehicle/vehicle_yolov3/demo/output
```
Some inference results are visualized below:
![](../../../docs/images/VehicleDetection_001.jpeg)
![](../../../docs/images/VehicleDetection_005.png)
| PaddleDetection/configs/ppvehicle/vehicle_yolov3/README.md/0 | {
"file_path": "PaddleDetection/configs/ppvehicle/vehicle_yolov3/README.md",
"repo_id": "PaddleDetection",
"token_count": 938
} | 23 |
architecture: YOLOv3
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet18_vd_pretrained.pdparams
norm_type: sync_bn
use_ema: true
ema_decay: 0.9998
YOLOv3:
backbone: ResNet
neck: PPYOLOFPN
yolo_head: YOLOv3Head
post_process: BBoxPostProcess
ResNet:
depth: 18
variant: d
return_idx: [2, 3]
freeze_at: -1
freeze_norm: false
norm_decay: 0.
PPYOLOFPN:
drop_block: true
block_size: 3
keep_prob: 0.9
conv_block_num: 0
YOLOv3Head:
anchor_masks: [[3, 4, 5], [0, 1, 2]]
anchors: [[10, 14], [23, 27], [37, 58],
[81, 82], [135, 169], [344, 319]]
loss: YOLOv3Loss
YOLOv3Loss:
ignore_thresh: 0.7
downsample: [32, 16]
label_smooth: false
scale_x_y: 1.05
iou_loss: IouLoss
IouLoss:
loss_weight: 2.5
loss_square: true
BBoxPostProcess:
decode:
name: YOLOBox
conf_thresh: 0.01
downsample_ratio: 32
clip_bbox: true
scale_x_y: 1.05
nms:
name: MatrixNMS
keep_top_k: 100
score_threshold: 0.01
post_threshold: 0.01
nms_top_k: -1
background_label: -1
| PaddleDetection/configs/ppyolo/_base_/ppyolo_r18vd.yml/0 | {
"file_path": "PaddleDetection/configs/ppyolo/_base_/ppyolo_r18vd.yml",
"repo_id": "PaddleDetection",
"token_count": 520
} | 24 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'./_base_/ppyolov2_r50vd_dcn.yml',
'./_base_/optimizer_365e.yml',
'./_base_/ppyolov2_reader.yml',
]
snapshot_epoch: 8
weights: output/ppyolov2_r101vd_dcn_365e_coco/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet101_vd_ssld_pretrained.pdparams
ResNet:
depth: 101
variant: d
return_idx: [1, 2, 3]
dcn_v2_stages: [3]
freeze_at: -1
freeze_norm: false
norm_decay: 0.
| PaddleDetection/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml/0 | {
"file_path": "PaddleDetection/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 235
} | 25 |
worker_num: 4
TrainReader:
sample_transforms:
- Decode: {}
- Poly2Mask: {del_poly: True}
- Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}
- RandomFlip: {prob: 0.5}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2SparseTarget: {}
batch_size: 4
shuffle: true
drop_last: true
collate_batch: false
use_shared_memory: true
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2SparseTarget: {}
batch_size: 1
shuffle: false
drop_last: false
TestReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2SparseTarget: {}
batch_size: 1
shuffle: false
| PaddleDetection/configs/queryinst/_base_/queryinst_reader.yml/0 | {
"file_path": "PaddleDetection/configs/queryinst/_base_/queryinst_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 508
} | 26 |
architecture: RetinaNet
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams
RetinaNet:
backbone: ResNet
neck: FPN
head: RetinaHead
ResNet:
depth: 50
variant: b
norm_type: bn
freeze_at: 0
return_idx: [1,2,3]
num_stages: 4
FPN:
out_channel: 256
spatial_scales: [0.125, 0.0625, 0.03125]
extra_stage: 2
has_extra_convs: true
use_c5: false
RetinaHead:
conv_feat:
name: RetinaFeat
feat_in: 256
feat_out: 256
num_convs: 4
norm_type: null
use_dcn: false
anchor_generator:
name: RetinaAnchorGenerator
octave_base_scale: 4
scales_per_octave: 3
aspect_ratios: [0.5, 1.0, 2.0]
strides: [8.0, 16.0, 32.0, 64.0, 128.0]
bbox_assigner:
name: MaxIoUAssigner
positive_overlap: 0.5
negative_overlap: 0.4
allow_low_quality: true
loss_class:
name: FocalLoss
gamma: 2.0
alpha: 0.25
loss_weight: 1.0
loss_bbox:
name: SmoothL1Loss
beta: 0.0
loss_weight: 1.0
nms:
name: MultiClassNMS
nms_top_k: 1000
keep_top_k: 100
score_threshold: 0.05
nms_threshold: 0.5
| PaddleDetection/configs/retinanet/_base_/retinanet_r50_fpn.yml/0 | {
"file_path": "PaddleDetection/configs/retinanet/_base_/retinanet_r50_fpn.yml",
"repo_id": "PaddleDetection",
"token_count": 546
} | 27 |
epoch: 36
LearningRate:
base_lr: 0.008
schedulers:
- !CosineDecay
max_epochs: 44
- !LinearWarmup
start_factor: 0.
steps: 1000
OptimizerBuilder:
clip_grad_by_norm: 35.
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0005
type: L2
| PaddleDetection/configs/rotate/ppyoloe_r/_base_/optimizer_3x.yml/0 | {
"file_path": "PaddleDetection/configs/rotate/ppyoloe_r/_base_/optimizer_3x.yml",
"repo_id": "PaddleDetection",
"token_count": 144
} | 28 |
worker_num: 4
TrainReader:
sample_transforms:
- Decode: {}
- Poly2Array: {}
- RandomRFlip: {}
- RResize: {target_size: [1024, 1024], keep_ratio: True, interp: 2}
- Poly2RBox: {rbox_type: 'le135'}
batch_transforms:
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
- Permute: {}
- PadRGT: {}
- PadBatch: {pad_to_stride: 32}
batch_size: 2
shuffle: true
drop_last: true
EvalReader:
sample_transforms:
- Decode: {}
- Poly2Array: {}
- RResize: {target_size: [1024, 1024], keep_ratio: True, interp: 2}
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
batch_size: 2
shuffle: false
drop_last: false
collate_batch: false
TestReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 2, target_size: [1024, 1024], keep_ratio: True}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
batch_size: 1
shuffle: false
drop_last: false
| PaddleDetection/configs/rotate/s2anet/_base_/s2anet_reader.yml/0 | {
"file_path": "PaddleDetection/configs/rotate/s2anet/_base_/s2anet_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 534
} | 29 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'_base_/optimizer_6x.yml',
'_base_/rtdetr_r50vd.yml',
'_base_/rtdetr_reader.yml',
]
weights: output/rtdetr_hgnetv2_l_6x_coco/model_final
pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/pretrained/PPHGNetV2_X_ssld_pretrained.pdparams
find_unused_parameters: True
log_iter: 200
DETR:
backbone: PPHGNetV2
PPHGNetV2:
arch: 'X'
return_idx: [1, 2, 3]
freeze_stem_only: True
freeze_at: 0
freeze_norm: True
lr_mult_list: [0., 0.01, 0.01, 0.01, 0.01]
HybridEncoder:
hidden_dim: 384
use_encoder_idx: [2]
num_encoder_layers: 1
encoder_layer:
name: TransformerLayer
d_model: 384
nhead: 8
dim_feedforward: 2048
dropout: 0.
activation: 'gelu'
expansion: 1.0
| PaddleDetection/configs/rtdetr/rtdetr_hgnetv2_x_6x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/rtdetr/rtdetr_hgnetv2_x_6x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 379
} | 30 |
_BASE_: [
'../../ppyoloe/ppyoloe_plus_crn_s_80e_coco.yml',
]
log_iter: 50
snapshot_epoch: 5
weights: output/ppyoloe_plus_crn_s_80e_coco_sup005/model_final
pretrain_weights: https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_s_obj365_pretrained.pdparams
depth_mult: 0.33
width_mult: 0.50
TrainDataset:
!COCODataSet
image_dir: train2017
anno_path: semi_annotations/instances_train2017.1@5.json
dataset_dir: dataset/coco
data_fields: ['image', 'gt_bbox', 'gt_class']
epoch: 80
LearningRate:
base_lr: 0.001
schedulers:
- !CosineDecay
max_epochs: 96
- !LinearWarmup
start_factor: 0.
epochs: 5
| PaddleDetection/configs/semi_det/baseline/ppyoloe_plus_crn_s_80e_coco_sup005.yml/0 | {
"file_path": "PaddleDetection/configs/semi_det/baseline/ppyoloe_plus_crn_s_80e_coco_sup005.yml",
"repo_id": "PaddleDetection",
"token_count": 308
} | 31 |
_BASE_: [
'../../yolov3/yolov3_r34_270e_coco.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/yolov3_r34_270e_coco.pdparams
slim: DistillPrune
distill_loss: DistillYOLOv3Loss
DistillYOLOv3Loss:
weight: 1000
pruner: Pruner
Pruner:
criterion: l1_norm
pruned_params: ['conv2d_27.w_0', 'conv2d_28.w_0', 'conv2d_29.w_0',
'conv2d_30.w_0', 'conv2d_31.w_0', 'conv2d_32.w_0',
'conv2d_34.w_0', 'conv2d_35.w_0', 'conv2d_36.w_0',
'conv2d_37.w_0', 'conv2d_38.w_0', 'conv2d_39.w_0',
'conv2d_41.w_0', 'conv2d_42.w_0', 'conv2d_43.w_0',
'conv2d_44.w_0', 'conv2d_45.w_0', 'conv2d_46.w_0']
pruned_ratios: [0.5,0.5,0.5,0.5,0.5,0.5,0.7,0.7,0.7,0.7,0.7,0.7,0.8,0.8,0.8,0.8,0.8,0.8]
| PaddleDetection/configs/slim/extensions/yolov3_mobilenet_v1_coco_distill_prune.yml/0 | {
"file_path": "PaddleDetection/configs/slim/extensions/yolov3_mobilenet_v1_coco_distill_prune.yml",
"repo_id": "PaddleDetection",
"token_count": 515
} | 32 |
pretrain_weights: https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams
slim: Pruner
Pruner:
criterion: fpgm
pruned_params: ['conv2d_52.w_0', 'conv2d_53.w_0', 'conv2d_54.w_0',
'conv2d_55.w_0', 'conv2d_56.w_0', 'conv2d_57.w_0',
'conv2d_59.w_0', 'conv2d_60.w_0', 'conv2d_61.w_0',
'conv2d_62.w_0', 'conv2d_63.w_0', 'conv2d_64.w_0',
'conv2d_66.w_0', 'conv2d_67.w_0', 'conv2d_68.w_0',
'conv2d_69.w_0', 'conv2d_70.w_0', 'conv2d_71.w_0']
pruned_ratios: [0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.875,0.875,0.875,0.875,0.875,0.875]
print_params: True
| PaddleDetection/configs/slim/prune/yolov3_darknet_prune_fpgm.yml/0 | {
"file_path": "PaddleDetection/configs/slim/prune/yolov3_darknet_prune_fpgm.yml",
"repo_id": "PaddleDetection",
"token_count": 442
} | 33 |
# PP-YOLOE-SOD 小目标检测模型(PP-YOLOE Small Object Detection)
<img src="https://user-images.githubusercontent.com/82303451/182520025-f6bd1c76-a9f9-4f8c-af9b-b37a403258d8.png" title="VisDrone" alt="VisDrone" width="300"><img src="https://user-images.githubusercontent.com/82303451/182521833-4aa0314c-b3f2-4711-9a65-cabece612737.png" title="VisDrone" alt="VisDrone" width="300"><img src="https://user-images.githubusercontent.com/82303451/182520038-cacd5d09-0b85-475c-8e59-72f1fc48eef8.png" title="DOTA" alt="DOTA" height="168"><img src="https://user-images.githubusercontent.com/82303451/182524123-dcba55a2-ce2d-4ba1-9d5b-eb99cb440715.jpeg" title="Xview" alt="Xview" height="168">
## 内容
- [简介](#简介)
- [切图使用说明](#切图使用说明)
- [小目标数据集下载](#小目标数据集下载)
- [统计数据集分布](#统计数据集分布)
- [SAHI切图](#SAHI切图)
- [模型库](#模型库)
- [VisDrone模型](#VisDrone模型)
- [COCO模型](#COCO模型)
- [切图模型](#切图模型)
- [拼图模型](#拼图模型)
- [注意事项](#注意事项)
- [模型库使用说明](#模型库使用说明)
- [训练](#训练)
- [评估](#评估)
- [预测](#预测)
- [部署](#部署)
- [引用](#引用)
## 简介
PaddleDetection团队提供了针对VisDrone-DET、DOTA水平框、Xview等小目标场景数据集的基于PP-YOLOE改进的检测模型 PP-YOLOE-SOD,以及提供了一套使用[SAHI](https://github.com/obss/sahi)(Slicing Aided Hyper Inference)工具的切图和拼图的方案。
- PP-YOLOE-SOD 是PaddleDetection团队自研的小目标检测特色模型,使用**数据集分布相关的基于向量的DFL算法** 和 **针对小目标优化的中心先验优化策略**,并且**在模型的Neck(FPN)结构中加入Transformer模块**,以及结合增加P2层、使用large size等策略,最终在多个小目标数据集上达到极高的精度。
- 切图拼图方案**适用于任何检测模型**,建议**使用 PP-YOLOE-SOD 结合切图拼图方案**一起使用以达到最佳的效果。
- 官方 AI Studio 教程案例请参考 [基于PP-YOLOE-SOD的无人机航拍图像检测案例全流程实操](https://aistudio.baidu.com/aistudio/projectdetail/5036782),欢迎一起动手实践学习。
- 第三方 AI Studio 教程案例可参考 [PPYOLOE:遥感场景下的小目标检测与部署(切图版)](https://aistudio.baidu.com/aistudio/projectdetail/4493701) 和 [涨分神器!基于PPYOLOE的切图和拼图解决方案](https://aistudio.baidu.com/aistudio/projectdetail/4438275),欢迎一起动手实践学习。
**注意:**
- **不通过切图拼图而直接使用原图或子图**去训练评估预测,推荐使用 PP-YOLOE-SOD 模型,更多细节和消融实验可参照[COCO模型](#COCO模型)和[VisDrone模型](./visdrone)。
- 是否需要切图然后使用子图去**训练**,建议首先参照[切图使用说明](#切图使用说明)中的[统计数据集分布](#统计数据集分布)分析一下数据集再确定,一般数据集中**所有的目标均极小**的情况下推荐切图去训练。
- 是否需要切图然后使用子图去**预测**,建议在切图训练的情况下,配合着**同样操作的切图策略和参数**去预测(inference)效果更佳。但其实即便不切图训练,也可进行切图预测(inference),只需**在常规的预测命令最后加上`--slice_infer`以及相关子图参数**即可。
- 是否需要切图然后使用子图去**评估**,建议首先确保制作生成了合适的子图验证集,以及确保对应的标注框制作无误,并需要参照[模型库使用说明-评估](#评估)去**改动配置文件中的验证集(EvalDataset)的相关配置**,然后**在常规的评估命令最后加上`--slice_infer`以及相关子图参数**即可。
- `--slice_infer`的操作在PaddleDetection中默认**子图预测框会自动组合并拼回原图**,默认返回的是原图上的预测框,此方法也**适用于任何训好的检测模型**,无论是否切图训练。
## 切图使用说明
### 小目标数据集下载
PaddleDetection团队整理提供的VisDrone-DET、DOTA水平框、Xview等小目标场景数据集的下载链接可以参照 [DataDownload.md](./DataDownload.md)。
### 统计数据集分布
对于待训的数据集(默认已处理为COCO格式,参照 [COCO格式数据集准备](../../docs/tutorials/data/PrepareDetDataSet.md#用户数据转成COCO数据),首先统计**标注框的平均宽高占图片真实宽高的比例**分布:
以DOTA水平框数据集的train数据集为例:
```bash
python tools/box_distribution.py --json_path dataset/DOTA/annotations/train.json --out_img box_distribution.jpg --eval_size 640 --small_stride 8
```
- `--json_path` :待统计数据集 COCO 格式 annotation 的json标注文件路径
- `--out_img` :输出的统计分布图的路径
- `--eval_size` :推理尺度(默认640)
- `--small_stride` :模型最小步长(默认8)
统计结果打印如下:
```bash
Suggested reg_range[1] is 13 # DFL算法中推荐值,在 PP-YOLOE-SOD 模型的配置文件的head中设置为此值,效果最佳
Mean of all img_w is 2304.3981547196595 # 原图宽的平均值
Mean of all img_h is 2180.9354151880766 # 原图高的平均值
Median of ratio_w is 0.03799439775910364 # 标注框的宽与原图宽的比例的中位数
Median of ratio_h is 0.04074914637387802 # 标注框的高与原图高的比例的中位数
all_img with box: 1409 # 数据集图片总数(排除无框或空标注的图片)
all_ann: 98905 # 数据集标注框总数
Distribution saved as box_distribution.jpg
```
**注意:**
- 一般情况下,在原始数据集全部有标注框的图片中,**原图宽高的平均值大于1500像素,且有1/2以上的图片标注框的平均宽高与原图宽高比例小于0.04时(通过打印中位数得到该值)**,建议进行切图训练。
- `Suggested reg_range[1]` 为数据集在优化后DFL算法中推荐的`reg_range`上限,即`reg_max + 1`,在 PP-YOLOE-SOD 模型的配置文件的head中设置这个值。
### SAHI切图
针对需要切图的数据集,使用[SAHI](https://github.com/obss/sahi)库进行切图:
#### 安装SAHI库:
参考[SAHI installation](https://github.com/obss/sahi/blob/main/README.md#installation)进行安装,`pip install sahi`,参考[installation](https://github.com/obss/sahi/blob/main/README.md#installation)。
#### 基于SAHI切图
以DOTA水平框数据集的train数据集为例,切分后的**子图文件夹**与**子图json标注文件**共同保存在`dota_sliced`文件夹下,分别命名为`train_images_500_025`、`train_500_025.json`:
```bash
python tools/slice_image.py --image_dir dataset/DOTA/train/ --json_path dataset/DOTA/annotations/train.json --output_dir dataset/dota_sliced --slice_size 500 --overlap_ratio 0.25
```
- `--image_dir`:原始数据集图片文件夹的路径
- `--json_path`:原始数据集COCO格式的json标注文件的路径
- `--output_dir`:切分后的子图及其json标注文件保存的路径
- `--slice_size`:切分以后子图的边长尺度大小(默认切图后为正方形)
- `--overlap_ratio`:切分时的子图之间的重叠率
**注意:**
- 如果切图然后使用子图去**训练**,则只能**离线切图**,即切完图后保存成子图,存放在内存空间中。
- 如果切图然后使用子图去**评估或预测**,则既可以**离线切图**,也可以**在线切图**,PaddleDetection中支持切图并自动拼图组合结果到原图上。
## 模型库
### [VisDrone模型](visdrone/)
| 模型 | COCOAPI mAP<sup>val<br>0.5:0.95 | COCOAPI mAP<sup>val<br>0.5 | COCOAPI mAP<sup>test_dev<br>0.5:0.95 | COCOAPI mAP<sup>test_dev<br>0.5 | MatlabAPI mAP<sup>test_dev<br>0.5:0.95 | MatlabAPI mAP<sup>test_dev<br>0.5 | 下载 | 配置文件 |
|:---------|:------:|:------:| :----: | :------:| :------: | :------:| :----: | :------:|
|PP-YOLOE-s| 23.5 | 39.9 | 19.4 | 33.6 | 23.68 | 40.66 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_crn_s_80e_visdrone.yml) |
|PP-YOLOE-P2-Alpha-s| 24.4 | 41.6 | 20.1 | 34.7 | 24.55 | 42.19 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_p2_alpha_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_crn_s_p2_alpha_80e_visdrone.yml) |
|**PP-YOLOE+_SOD-s**| **25.1** | **42.8** | **20.7** | **36.2** | **25.16** | **43.86** | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_s_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_plus_sod_crn_s_80e_visdrone.yml) |
|PP-YOLOE-l| 29.2 | 47.3 | 23.5 | 39.1 | 28.00 | 46.20 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_crn_l_80e_visdrone.yml) |
|PP-YOLOE-P2-Alpha-l| 30.1 | 48.9 | 24.3 | 40.8 | 28.47 | 48.16 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_p2_alpha_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_crn_l_p2_alpha_80e_visdrone.yml) |
|**PP-YOLOE+_SOD-l**| **31.9** | **52.1** | **25.6** | **43.5** | **30.25** | **51.18** | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_l_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_plus_sod_crn_l_80e_visdrone.yml) |
|PP-YOLOE-Alpha-largesize-l| 41.9 | 65.0 | 32.3 | 53.0 | 37.13 | 61.15 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_alpha_largesize_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_crn_l_alpha_largesize_80e_visdrone.yml) |
|PP-YOLOE-P2-Alpha-largesize-l| 41.3 | 64.5 | 32.4 | 53.1 | 37.49 | 51.54 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_p2_alpha_largesize_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_crn_l_p2_alpha_largesize_80e_visdrone.yml) |
|PP-YOLOE+_largesize-l | 43.3 | 66.7 | 33.5 | 54.7 | 38.24 | 62.76 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_largesize_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_plus_crn_l_largesize_80e_visdrone.yml) |
|**PP-YOLOE+_SOD-largesize-l** | 42.7 | 65.9 | **33.6** | **55.1** | **38.4** | **63.07** | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_l_largesize_80e_visdrone.pdparams) | [配置文件](visdrone/ppyoloe_plus_sod_crn_l_largesize_80e_visdrone.yml) |
**注意:**
- 上表中的模型均为**使用原图训练**,也**使用原图评估预测**,AP精度均为**原图验证集**上评估的结果。
- VisDrone-DET数据集**可使用原图训练,也可使用切图后训练**,通过数据集统计分布分析,推荐使用**原图训练**,推荐直接使用带**SOD**的模型配置文件去训练评估和预测部署,在显卡算力有限时也可使用切图后训练。
- 上表中的模型指标均是使用VisDrone-DET的train子集作为训练集,使用VisDrone-DET的val子集和test_dev子集作为验证集。
- **SOD**表示使用**基于向量的DFL算法**和针对小目标的**中心先验优化策略**,并**在模型的Neck结构中加入transformer**。
- **P2**表示增加P2层(1/4下采样层)的特征,共输出4个PPYOLOEHead。
- **Alpha**表示对CSPResNet骨干网络增加可一个学习权重参数Alpha参与训练。
- **largesize**表示使用**以1600尺度为基础的多尺度训练**和**1920尺度预测**,相应的训练batch_size也减小,以速度来换取高精度。
- MatlabAPI测试是使用官网评测工具[VisDrone2018-DET-toolkit](https://github.com/VisDrone/VisDrone2018-DET-toolkit)。
<details>
<summary> 快速开始 </summary>
```shell
# 训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/smalldet/visdrone/ppyoloe_plus_sod_crn_l_80e_visdrone.yml --amp --eval
# 评估
python tools/eval.py -c configs/smalldet/visdrone/ppyoloe_plus_sod_crn_l_80e_visdrone.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_l_80e_visdrone.pdparams
# 预测
python tools/infer.py -c configs/smalldet/visdrone/ppyoloe_plus_sod_crn_l_80e_visdrone.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_l_80e_visdrone.pdparams --infer_img=demo/visdrone_0000315_01601_d_0000509.jpg --draw_threshold=0.25
```
</details>
### COCO模型
| 模型 | mAP<sup>val<br>0.5:0.95 | AP<sup>0.5 | AP<sup>0.75 | AP<sup>small | AP<sup>medium | AP<sup>large | AR<sup>small | AR<sup>medium | AR<sup>large | 下载链接 | 配置文件 |
|:--------:|:-----------------------:|:----------:|:-----------:|:------------:|:-------------:|:-----------:|:------------:|:-------------:|:------------:|:-------:|:-------:|
|PP-YOLOE+_l| 52.9 | 70.1 | 57.9 | 35.2 | 57.5 | 69.1 | 56.0 | 77.9 | 86.9 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco.pdparams) | [配置文件](../ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml) |
|**PP-YOLOE+_SOD-l**| 53.0 | **70.4** | 57.7 | **37.1** | 57.5 | 69.0 | **56.5** | 77.5 | 86.7 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_l_80e_coco.pdparams) | [配置文件](./ppyoloe_plus_sod_crn_l_80e_coco.yml) |
**注意:**
- 上表中的模型均为**使用原图训练**,也**原图评估预测**,网络输入尺度为640x640,训练集为COCO的train2017,验证集为val2017,均为8卡总batch_size为64训练80 epoch。
- **SOD**表示使用**基于向量的DFL算法**和针对小目标的**中心先验优化策略**,并**在模型的Neck结构中加入transformer**,可在 AP<sup>small 上提升1.9。
<details>
<summary> 快速开始 </summary>
```shell
# 训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/smalldet/ppyoloe_plus_sod_crn_l_80e_coco.yml --amp --eval
# 评估
python tools/eval.py -c configs/smalldet/ppyoloe_plus_sod_crn_l_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_l_80e_coco.pdparams
# 预测
python tools/infer.py -c configs/smalldet/ppyoloe_plus_sod_crn_l_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_sod_crn_l_80e_coco.pdparams --infer_img=demo/000000014439_640x640.jpg --draw_threshold=0.25
```
</details>
### 切图模型
| 模型 | 数据集 | SLICE_SIZE | OVERLAP_RATIO | 类别数 | mAP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | 下载链接 | 配置文件 |
|:---------|:---------------:|:---------------:|:---------------:|:------:|:-----------------------:|:-------------------:|:---------:| :-----: |
|PP-YOLOE-P2-l| DOTA | 500 | 0.25 | 15 | 53.9 | 78.6 | [下载链接](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_p2_crn_l_80e_sliced_DOTA_500_025.pdparams) | [配置文件](./ppyoloe_p2_crn_l_80e_sliced_DOTA_500_025.yml) |
|PP-YOLOE-P2-l| Xview | 400 | 0.25 | 60 | 14.9 | 27.0 | [下载链接](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_p2_crn_l_80e_sliced_xview_400_025.pdparams) | [配置文件](./ppyoloe_p2_crn_l_80e_sliced_xview_400_025.yml) |
|PP-YOLOE-l| VisDrone-DET| 640 | 0.25 | 10 | 38.5 | 60.2 | [下载链接](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams) | [配置文件](./ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml) |
**注意:**
- 上表中的模型均为使用**切图后的子图训练**,且使用**切图后的子图评估预测**,AP精度均为**子图验证集**上评估的结果。
- **SLICE_SIZE**表示使用SAHI工具切图后子图的边长大小,**OVERLAP_RATIO**表示切图的子图之间的重叠率。
- VisDrone-DET的模型与[拼图模型](#拼图模型)表格中的VisDrone-DET是**同一个模型权重**,但此处AP精度是在**切图后的子图验证集**上评估的结果。
<details>
<summary> 快速开始 </summary>
```shell
# 训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml --amp --eval
# 子图直接评估
python tools/eval.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
# 子图直接预测
python tools/infer.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams --infer_img=demo/visdrone_0000315_01601_d_0000509.jpg --draw_threshold=0.25
```
</details>
### 拼图模型
| 模型 | 数据集 | SLICE_SIZE | OVERLAP_RATIO | 类别数 | mAP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | 下载链接 | 配置文件 |
|:---------|:---------------:|:---------------:|:---------------:|:------:|:-----------------------:|:-------------------:|:---------:| :-----: |
|PP-YOLOE-l (原图直接评估)| VisDrone-DET| 640 | 0.25 | 10 | 29.7 | 48.5 | [下载链接](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams) | [配置文件](./ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml) |
|PP-YOLOE-l (切图拼图评估)| VisDrone-DET| 640 | 0.25 | 10 | 37.3 | 59.5 | [下载链接](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams) | [配置文件](./ppyoloe_crn_l_80e_sliced_visdrone_640_025_slice_infer.yml) |
**注意:**
- 上表中的模型均为使用**切图后的子图**训练,评估预测时分为两种,**直接使用原图**评估预测,和**使用子图自动拼成原图**评估预测,AP精度均为**原图验证集**上评估的结果。。
- **SLICE_SIZE**表示使用SAHI工具切图后子图的边长大小,**OVERLAP_RATIO**表示切图的子图之间的重叠率。
- VisDrone-DET的模型与[切图模型](#切图模型)表格中的VisDrone-DET是**同一个模型权重**,但此处AP精度是在**原图验证集**上评估的结果,需要提前修改`ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml`里的`EvalDataset`的默认的子图验证集路径为以下**原图验证集路径**:
```
EvalDataset:
!COCODataSet
image_dir: VisDrone2019-DET-val
anno_path: val.json
dataset_dir: dataset/visdrone
```
<details>
<summary> 快速开始 </summary>
```shell
# 训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml --amp --eval
# 原图直接评估,注意需要提前修改此yml中的 `EvalDataset` 的默认的子图验证集路径 为 原图验证集路径:
python tools/eval.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
# 切图拼图评估,加上 --slice_infer,注意是使用的带 _slice_infer 后缀的yml配置文件
python tools/eval.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025_slice_infer.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams --slice_infer
# 切图拼图预测,加上 --slice_infer
python tools/infer.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams --infer_img=demo/visdrone_0000315_01601_d_0000509.jpg --draw_threshold=0.25 --slice_infer
```
</details>
### 注意事项
- 切图和拼图,需要使用[SAHI](https://github.com/obss/sahi)切图工具,需要首先安装:`pip install sahi`,参考[installation](https://github.com/obss/sahi/blob/main/README.md#installation)。
- DOTA水平框和Xview数据集均是**切图后训练**,AP指标为**切图后的子图val上的指标**。
- VisDrone-DET数据集请参照[visdrone](./visdrone),**可使用原图训练,也可使用切图后训练**,这上面表格中的指标均是使用VisDrone-DET的val子集做验证而未使用test_dev子集。
- PP-YOLOE模型训练过程中使用8 GPUs进行混合精度训练,如果**GPU卡数**或者**batch size**发生了改变,你需要按照公式 **lr<sub>new</sub> = lr<sub>default</sub> * (batch_size<sub>new</sub> * GPU_number<sub>new</sub>) / (batch_size<sub>default</sub> * GPU_number<sub>default</sub>)** 调整学习率。
- 常用训练验证部署等步骤请参考[ppyoloe](../ppyoloe#getting-start)。
- 自动切图和拼图的推理预测需添加设置`--slice_infer`,具体见下文[模型库使用说明](#模型库使用说明)中的[预测](#预测)和[部署](#部署)。
- 自动切图和拼图过程,参照[2.3 子图拼图评估](#评估)。
## 模型库使用说明
### 训练
#### 1.1 原图训练
首先将待训数据集制作成COCO数据集格式,然后按照PaddleDetection的模型的常规训练流程训练即可。
执行以下指令使用混合精度训练COCO数据集:
```bash
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/smalldet/ppyoloe_plus_sod_crn_l_80e_coco.yml --amp --eval
```
**注意:**
- 使用默认配置训练需要设置`--amp`以避免显存溢出,`--eval`表示边训边验证,会自动保存最佳精度的模型权重。
#### 1.2 原图训练
首先将待训数据集制作成COCO数据集格式,然后使用SAHI切图工具进行**离线切图**,对保存的子图按**常规检测模型的训练流程**走即可。
也可直接下载PaddleDetection团队提供的切图后的VisDrone-DET、DOTA水平框、Xview数据集。
执行以下指令使用混合精度训练VisDrone切图数据集:
```bash
python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml --amp --eval
```
### 评估
#### 2.1 子图评估
**默认评估方式是子图评估**,子图数据集的验证集设置为:
```
EvalDataset:
!COCODataSet
image_dir: val_images_640_025
anno_path: val_640_025.json
dataset_dir: dataset/visdrone_sliced
```
按常规检测模型的评估流程,评估提前切好并存下来的子图上的精度:
```bash
CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
```
#### 2.2 原图评估
修改验证集的标注文件路径为**原图标注文件**:
```
EvalDataset:
!COCODataSet
image_dir: VisDrone2019-DET-val
anno_path: val.json
dataset_dir: dataset/visdrone
```
直接评估原图上的精度:
```bash
CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
```
#### 2.3 子图拼图评估
修改验证集的标注文件路径为**原图标注文件**:
```
# very slow, preferly eval with a determined weights(xx.pdparams)
# if you want to eval during training, change SlicedCOCODataSet to COCODataSet and delete sliced_size and overlap_ratio
EvalDataset:
!SlicedCOCODataSet
image_dir: VisDrone2019-DET-val
anno_path: val.json
dataset_dir: dataset/visdrone
sliced_size: [640, 640]
overlap_ratio: [0.25, 0.25]
```
会在评估过程中自动对原图进行切图最后再重组和融合结果来评估原图上的精度:
```bash
CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025_slice_infer.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams --slice_infer --combine_method=nms --match_threshold=0.6 --match_metric=ios
```
**注意:**
- 设置`--slice_infer`表示切图预测并拼装重组结果,如果不使用则不写,注意需要确保EvalDataset的数据集类是选用的SlicedCOCODataSet而不是COCODataSet;
- 设置`--slice_size`表示切图的子图尺寸大小,设置`--overlap_ratio`表示子图间重叠率,可以自行修改选择合适的子图尺度sliced_size和子图间重叠率overlap_ratio,如:
```
EvalDataset:
!SlicedCOCODataSet
image_dir: VisDrone2019-DET-val
anno_path: val.json
dataset_dir: dataset/visdrone
sliced_size: [480, 480]
overlap_ratio: [0.2, 0.2]
```
- 设置`--combine_method`表示子图结果重组去重的方式,默认是`nms`;
- 设置`--match_threshold`表示子图结果重组去重的阈值,默认是0.6;
- 设置`--match_metric`表示子图结果重组去重的度量标准,默认是`ios`表示交小比(两个框交集面积除以更小框的面积),也可以选择交并比`iou`(两个框交集面积除以并集面积),精度效果因数据集而而异,但选择`ios`预测速度会更快一点;
### 预测
#### 3.1 子图或原图直接预测
与评估流程基本相同,可以在提前切好并存下来的子图上预测,也可以对原图预测,如:
```bash
CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams --infer_img=demo/visdrone_0000315_01601_d_0000509.jpg --draw_threshold=0.25
```
#### 3.2 原图自动切图并拼图预测
也可以对原图进行自动切图并拼图重组来预测原图,如:
```bash
# 单张图
CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams --infer_img=demo/visdrone_0000315_01601_d_0000509.jpg --draw_threshold=0.25 --slice_infer --slice_size 640 640 --overlap_ratio 0.25 0.25 --combine_method=nms --match_threshold=0.6 --match_metric=ios --save_results=True
# 或图片文件夹
CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams --infer_dir=demo/ --draw_threshold=0.25 --slice_infer --slice_size 640 640 --overlap_ratio 0.25 0.25 --combine_method=nms --match_threshold=0.6 --match_metric=ios
```
- 设置`--slice_infer`表示切图预测并拼装重组结果,如果不使用则不写;
- 设置`--slice_size`表示切图的子图尺寸大小,设置`--overlap_ratio`表示子图间重叠率;
- 设置`--combine_method`表示子图结果重组去重的方式,默认是`nms`;
- 设置`--match_threshold`表示子图结果重组去重的阈值,默认是0.6;
- 设置`--match_metric`表示子图结果重组去重的度量标准,默认是`ios`表示交小比(两个框交集面积除以更小框的面积),也可以选择交并比`iou`(两个框交集面积除以并集面积),精度效果因数据集而而异,但选择`ios`预测速度会更快一点;
- 设置`--save_results`表示保存图片结果为json文件,一般只单张图预测时使用;
### 部署
#### 4.1 导出模型
```bash
# export model
CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_80e_sliced_visdrone_640_025.pdparams
```
#### 4.2 使用原图或子图直接推理
```bash
# deploy infer
CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_80e_sliced_visdrone_640_025 --image_file=demo/visdrone_0000315_01601_d_0000509.jpg --device=GPU --save_images --threshold=0.25
```
#### 4.3 使用原图自动切图并拼图重组结果来推理
```bash
# deploy slice infer
# 单张图
CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_80e_sliced_visdrone_640_025 --image_file=demo/visdrone_0000315_01601_d_0000509.jpg --device=GPU --save_images --threshold=0.25 --slice_infer --slice_size 640 640 --overlap_ratio 0.25 0.25 --combine_method=nms --match_threshold=0.6 --match_metric=ios --save_results=True
# 或图片文件夹
CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_80e_sliced_visdrone_640_025 --image_dir=demo/ --device=GPU --save_images --threshold=0.25 --slice_infer --slice_size 640 640 --overlap_ratio 0.25 0.25 --combine_method=nms --match_threshold=0.6 --match_metric=ios
```
- 设置`--slice_infer`表示切图预测并拼装重组结果,如果不使用则不写;
- 设置`--slice_size`表示切图的子图尺寸大小,设置`--overlap_ratio`表示子图间重叠率;
- 设置`--combine_method`表示子图结果重组去重的方式,默认是`nms`;
- 设置`--match_threshold`表示子图结果重组去重的阈值,默认是0.6;
- 设置`--match_metric`表示子图结果重组去重的度量标准,默认是`ios`表示交小比(两个框交集面积除以更小框的面积),也可以选择交并比`iou`(两个框交集面积除以并集面积),精度效果因数据集而而异,但选择`ios`预测速度会更快一点;
- 设置`--save_results`表示保存图片结果为json文件,一般只单张图预测时使用;
## 引用
```
@article{akyon2022sahi,
title={Slicing Aided Hyper Inference and Fine-tuning for Small Object Detection},
author={Akyon, Fatih Cagatay and Altinuc, Sinan Onur and Temizel, Alptekin},
journal={2022 IEEE International Conference on Image Processing (ICIP)},
doi={10.1109/ICIP46576.2022.9897990},
pages={966-970},
year={2022}
}
@inproceedings{xia2018dota,
title={DOTA: A large-scale dataset for object detection in aerial images},
author={Xia, Gui-Song and Bai, Xiang and Ding, Jian and Zhu, Zhen and Belongie, Serge and Luo, Jiebo and Datcu, Mihai and Pelillo, Marcello and Zhang, Liangpei},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={3974--3983},
year={2018}
}
@ARTICLE{9573394,
author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Detection and Tracking Meet Drones Challenge},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3119563}
}
```
| PaddleDetection/configs/smalldet/README.md/0 | {
"file_path": "PaddleDetection/configs/smalldet/README.md",
"repo_id": "PaddleDetection",
"token_count": 17838
} | 34 |
_BASE_: [
'../datasets/sniper_visdrone_detection.yml',
'../runtime.yml',
'../ppyolo/_base_/ppyolo_r50vd_dcn.yml',
'../ppyolo/_base_/optimizer_1x.yml',
'./_base_/ppyolo_reader.yml',
]
snapshot_epoch: 8
use_ema: true
weights: output/ppyolo_r50vd_dcn_1x_sniper_visdrone/model_final
LearningRate:
base_lr: 0.005
schedulers:
- !PiecewiseDecay
gamma: 0.
milestones:
- 153
- 173
- !LinearWarmup
start_factor: 0.1
steps: 4000
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0005
type: L2
| PaddleDetection/configs/sniper/ppyolo_r50vd_dcn_1x_sniper_visdrone.yml/0 | {
"file_path": "PaddleDetection/configs/sniper/ppyolo_r50vd_dcn_1x_sniper_visdrone.yml",
"repo_id": "PaddleDetection",
"token_count": 275
} | 35 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'_base_/sparse_rcnn_r50_fpn.yml',
'_base_/optimizer_3x.yml',
'_base_/sparse_rcnn_reader.yml',
]
num_classes: 80
weights: output/sparse_rcnn_r50_fpn_3x_pro300_coco/model_final
snapshot_epoch: 1
SparseRCNNHead:
num_proposals: 300
SparsePostProcess:
num_proposals: 300
| PaddleDetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_3x_pro300_coco.yml/0 | {
"file_path": "PaddleDetection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_3x_pro300_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 169
} | 36 |
architecture: SSD
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/VGG16_caffe_pretrained.pdparams
# Model Architecture
SSD:
# model feat info flow
backbone: VGG
ssd_head: SSDHead
# post process
post_process: BBoxPostProcess
VGG:
depth: 16
normalizations: [20., -1, -1, -1, -1, -1]
SSDHead:
anchor_generator:
steps: [8, 16, 32, 64, 100, 300]
aspect_ratios: [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]]
min_ratio: 20
max_ratio: 90
min_sizes: [30.0, 60.0, 111.0, 162.0, 213.0, 264.0]
max_sizes: [60.0, 111.0, 162.0, 213.0, 264.0, 315.0]
offset: 0.5
flip: true
min_max_aspect_ratios_order: true
BBoxPostProcess:
decode:
name: SSDBox
nms:
name: MultiClassNMS
keep_top_k: 200
score_threshold: 0.01
nms_threshold: 0.45
nms_top_k: 400
nms_eta: 1.0
| PaddleDetection/configs/ssd/_base_/ssd_vgg16_300.yml/0 | {
"file_path": "PaddleDetection/configs/ssd/_base_/ssd_vgg16_300.yml",
"repo_id": "PaddleDetection",
"token_count": 410
} | 37 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'../ppyoloe/_base_/ppyoloe_plus_crn.yml',
'../ppyoloe/_base_/ppyoloe_plus_reader.yml',
]
depth_mult: 0.33 # s version
width_mult: 0.50
log_iter: 50
snapshot_epoch: 4
weights: output/ppyoloe_plus_swin_tiny_36e_coco/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/swin_tiny_patch4_window7_224_22kto1k_pretrained.pdparams
architecture: PPYOLOE
norm_type: sync_bn
use_ema: true
ema_decay: 0.9998
ema_black_list: ['proj_conv.weight']
custom_black_list: ['reduce_mean']
PPYOLOE:
backbone: SwinTransformer
neck: CustomCSPPAN
yolo_head: PPYOLOEHead
post_process: ~
SwinTransformer:
arch: 'swin_T_224' # ['swin_T_224', 'swin_S_224', 'swin_B_224', 'swin_L_224', 'swin_B_384', 'swin_L_384']
ape: false
drop_path_rate: 0.1
patch_norm: true
out_indices: [1, 2, 3]
PPYOLOEHead:
static_assigner_epoch: 12
nms:
nms_top_k: 1000
keep_top_k: 300
score_threshold: 0.01
nms_threshold: 0.7
TrainReader:
batch_size: 8
epoch: 36
LearningRate:
base_lr: 0.0001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [24, 33]
- !LinearWarmup
start_factor: 0.1
steps: 1000
OptimizerBuilder:
clip_grad_by_norm: 1.0
optimizer:
type: AdamW
weight_decay: 0.05
param_groups:
- params: ['absolute_pos_embed', 'relative_position_bias_table', 'norm']
weight_decay: 0.0
| PaddleDetection/configs/swin/ppyoloe_plus_swin_tiny_36e_coco.yml/0 | {
"file_path": "PaddleDetection/configs/swin/ppyoloe_plus_swin_tiny_36e_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 663
} | 38 |
worker_num: 2
TrainReader:
sample_transforms:
- Decode: {}
- RandomFlip: {prob: 0.5}
- Resize: {interp: 1, target_size: [512, 512], keep_ratio: False}
- NormalizeImage: {mean: [123.675, 116.28, 103.53], std: [58.395, 57.12, 57.375], is_scale: false}
- Permute: {}
batch_transforms:
- Gt2TTFTarget: {down_ratio: 4}
- PadBatch: {pad_to_stride: 32}
batch_size: 12
shuffle: true
drop_last: true
use_shared_memory: true
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [512, 512], keep_ratio: False}
- NormalizeImage: {is_scale: false, mean: [123.675, 116.28, 103.53], std: [58.395, 57.12, 57.375]}
- Permute: {}
batch_size: 1
drop_last: false
TestReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [512, 512], keep_ratio: False}
- NormalizeImage: {is_scale: false, mean: [123.675, 116.28, 103.53], std: [58.395, 57.12, 57.375]}
- Permute: {}
batch_size: 1
drop_last: false
| PaddleDetection/configs/ttfnet/_base_/ttfnet_reader.yml/0 | {
"file_path": "PaddleDetection/configs/ttfnet/_base_/ttfnet_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 416
} | 39 |
# YOLOF (You Only Look One-level Feature)
## ModelZOO
| 网络网络 | 输入尺寸 | 图片数/GPU | Epochs | 模型推理耗时(ms) | mAP<sup>val<br>0.5:0.95 | Params(M) | FLOPs(G) | 下载链接 | 配置文件 |
| :--------------------- | :------- | :-------: | :----: | :----------: | :---------------------: | :----------------: |:---------: | :------: |:---------------: |
| YOLOF-R_50_C5 (paper) | 800x1333 | 4 | 12 | - | 37.7 | - | - | - | - |
| YOLOF-R_50_C5 | 800x1333 | 4 | 12 | - | 38.1 | 44.16 | 241.64 | [下载链接](https://paddledet.bj.bcebos.com/models/yolof_r50_c5_1x_coco.pdparams) | [配置文件](./yolof_r50_c5_1x_coco.yml) |
**注意:**
- YOLOF模型训练过程中默认使用8 GPUs进行混合精度训练,总batch_size默认为32。
## Citations
```
@inproceedings{chen2021you,
title={You Only Look One-level Feature},
author={Chen, Qiang and Wang, Yingming and Yang, Tong and Zhang, Xiangyu and Cheng, Jian and Sun, Jian},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
year={2021}
}
```
| PaddleDetection/configs/yolof/README.md/0 | {
"file_path": "PaddleDetection/configs/yolof/README.md",
"repo_id": "PaddleDetection",
"token_count": 623
} | 40 |
_BASE_: [
'../datasets/voc.yml',
'../runtime.yml',
'_base_/optimizer_270e.yml',
'_base_/yolov3_darknet53.yml',
'_base_/yolov3_reader.yml',
]
snapshot_epoch: 5
weights: output/yolov3_darknet53_270e_voc/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
# ### remove comment below and run evaluate again to get 56.1 COCO for mAP(0.5:0.95)
# metric: COCO
# EvalDataset:
# !COCODataSet
# image_dir: VOCdevkit/VOC2007/JPEGImages
# anno_path: voc_test.json
# dataset_dir: dataset/voc
# # wget https://bj.bcebos.com/v1/paddledet/data/voc.zip
| PaddleDetection/configs/yolov3/yolov3_darknet53_270e_voc.yml/0 | {
"file_path": "PaddleDetection/configs/yolov3/yolov3_darknet53_270e_voc.yml",
"repo_id": "PaddleDetection",
"token_count": 345
} | 41 |
worker_num: 4
TrainReader:
sample_transforms:
- Decode: {}
- Mosaic:
prob: 1.0
input_dim: [640, 640]
degrees: [-10, 10]
scale: [0.1, 2.0]
shear: [-2, 2]
translate: [-0.1, 0.1]
enable_mixup: True
mixup_prob: 1.0
mixup_scale: [0.5, 1.5]
- AugmentHSV: {is_bgr: False, hgain: 5, sgain: 30, vgain: 30}
- PadResize: {target_size: 640}
- RandomFlip: {}
batch_transforms:
- Permute: {}
batch_size: 8
shuffle: True
drop_last: True
collate_batch: False
mosaic_epoch: 285
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {target_size: [640, 640], keep_ratio: True, interp: 1}
- Pad: {size: [640, 640], fill_value: [114., 114., 114.]}
- Permute: {}
batch_size: 4
TestReader:
inputs_def:
image_shape: [3, 640, 640]
sample_transforms:
- Decode: {}
- Resize: {target_size: [640, 640], keep_ratio: True, interp: 1}
- Pad: {size: [640, 640], fill_value: [114., 114., 114.]}
- Permute: {}
batch_size: 1
| PaddleDetection/configs/yolox/_base_/yolox_reader.yml/0 | {
"file_path": "PaddleDetection/configs/yolox/_base_/yolox_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 509
} | 42 |
# 推理Benchmark
## 一、环境准备
- 1、测试环境:
- CUDA 10.1
- CUDNN 7.6
- TensorRT-6.0.1
- PaddlePaddle v2.0.1
- GPU分别为: Tesla V100和GTX 1080Ti和Jetson AGX Xavier
- 2、测试方式:
- 为了方便比较不同模型的推理速度,输入采用同样大小的图片,为 3x640x640,采用 `demo/000000014439_640x640.jpg` 图片。
- Batch Size=1
- 去掉前100轮warmup时间,测试100轮的平均时间,单位ms/image,包括网络计算时间、数据拷贝至CPU的时间。
- 采用Fluid C++预测引擎: 包含Fluid C++预测、Fluid-TensorRT预测,下面同时测试了Float32 (FP32) 和Float16 (FP16)的推理速度。
**注意:** TensorRT中固定尺寸和动态尺寸区别请参考文档[TENSOR教程](TENSOR_RT.md)。由于固定尺寸下对两阶段模型支持不完善,所以faster rcnn模型采用动态尺寸测试。固定尺寸和动态尺寸支持融合的OP不完全一样,因此同一个模型在固定尺寸和动态尺寸下测试的性能可能会有一点差异。
## 二、推理速度
### 1、Linux系统
#### (1)Tesla V100
| 模型 | backbone | 是否固定尺寸 | 入网尺寸 | paddle_inference | trt_fp32 | trt_fp16 |
|-------------------------------|--------------|--------|----------|------------------|----------|----------|
| Faster RCNN FPN | ResNet50 | 否 | 640x640 | 27.99 | 26.15 | 21.92 |
| Faster RCNN FPN | ResNet50 | 否 | 800x1312 | 32.49 | 25.54 | 21.70 |
| YOLOv3 | Mobilenet\_v1 | 是 | 608x608 | 9.74 | 8.61 | 6.28 |
| YOLOv3 | Darknet53 | 是 | 608x608 | 17.84 | 15.43 | 9.86 |
| PPYOLO | ResNet50 | 是 | 608x608 | 20.77 | 18.40 | 13.53 |
| SSD | Mobilenet\_v1 | 是 | 300x300 | 5.17 | 4.43 | 4.29 |
| TTFNet | Darknet53 | 是 | 512x512 | 10.14 | 8.71 | 5.55 |
| FCOS | ResNet50 | 是 | 640x640 | 35.47 | 35.02 | 34.24 |
#### (2)Jetson AGX Xavier
| 模型 | backbone | 是否固定尺寸 | 入网尺寸 | paddle_inference | trt_fp32 | trt_fp16 |
|-------------------------------|--------------|--------|----------|------------------|----------|----------|
| Faster RCNN FPN | ResNet50 | 否 | 640x640 | 169.45 | 158.92 | 119.25 |
| Faster RCNN FPN | ResNet50 | 否 | 800x1312 | 228.07 | 156.39 | 117.03 |
| YOLOv3 | Mobilenet\_v1 | 是 | 608x608 | 48.76 | 43.83 | 18.41 |
| YOLOv3 | Darknet53 | 是 | 608x608 | 121.61 | 110.30 | 42.38 |
| PPYOLO | ResNet50 | 是 | 608x608 | 111.80 | 99.40 | 48.05 |
| SSD | Mobilenet\_v1 | 是 | 300x300 | 10.52 | 8.84 | 8.77 |
| TTFNet | Darknet53 | 是 | 512x512 | 73.77 | 64.03 | 31.46 |
| FCOS | ResNet50 | 是 | 640x640 | 217.11 | 214.38 | 205.78 |
### 2、Windows系统
#### (1)GTX 1080Ti
| 模型 | backbone | 是否固定尺寸 | 入网尺寸 | paddle_inference | trt_fp32 | trt_fp16 |
|-------------------------------|--------------|--------|----------|------------------|----------|----------|
| Faster RCNN FPN | ResNet50 | 否 | 640x640 | 50.74 | 57.17 | 62.08 |
| Faster RCNN FPN | ResNet50 | 否 | 800x1312 | 50.31 | 57.61 | 62.05 |
| YOLOv3 | Mobilenet\_v1 | 是 | 608x608 | 14.51 | 11.23 | 11.13 |
| YOLOv3 | Darknet53 | 是 | 608x608 | 30.26 | 23.92 | 24.02 |
| PPYOLO | ResNet50 | 是 | 608x608 | 38.06 | 31.40 | 31.94 |
| SSD | Mobilenet\_v1 | 是 | 300x300 | 16.47 | 13.87 | 13.76 |
| TTFNet | Darknet53 | 是 | 512x512 | 21.83 | 17.14 | 17.09 |
| FCOS | ResNet50 | 是 | 640x640 | 71.88 | 69.93 | 69.52 |
| PaddleDetection/deploy/BENCHMARK_INFER.md/0 | {
"file_path": "PaddleDetection/deploy/BENCHMARK_INFER.md",
"repo_id": "PaddleDetection",
"token_count": 2713
} | 43 |
metric: COCO
num_classes: 80
# Dataset configuration
TrainDataset:
!COCODataSet
image_dir: train2017
anno_path: annotations/instances_train2017.json
dataset_dir: dataset/coco/
EvalDataset:
!COCODataSet
image_dir: val2017
anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco/
worker_num: 0
# preprocess reader in test
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {target_size: [640, 640], keep_ratio: True, interp: 1}
- Pad: {size: [640, 640], fill_value: [114., 114., 114.]}
- NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], norm_type: none}
- Permute: {}
batch_size: 4
| PaddleDetection/deploy/auto_compression/configs/yolov8_reader.yml/0 | {
"file_path": "PaddleDetection/deploy/auto_compression/configs/yolov8_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 278
} | 44 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ctime>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "paddle_inference_api.h" // NOLINT
#include "include/config_parser.h"
#include "include/preprocess_op.h"
#include "include/tracker.h"
using namespace paddle_infer;
namespace PaddleDetection {
// JDE Detection Result
struct MOT_Rect {
float left;
float top;
float right;
float bottom;
};
struct MOT_Track {
int ids;
float score;
MOT_Rect rects;
};
typedef std::vector<MOT_Track> MOT_Result;
// Generate visualization color
cv::Scalar GetColor(int idx);
// Visualiztion Detection Result
cv::Mat VisualizeTrackResult(const cv::Mat& img,
const MOT_Result& results,
const float fps,
const int frame_id);
class JDEDetector {
public:
explicit JDEDetector(const std::string& model_dir,
const std::string& device = "CPU",
bool use_mkldnn = false,
int cpu_threads = 1,
const std::string& run_mode = "paddle",
const int batch_size = 1,
const int gpu_id = 0,
const int trt_min_shape = 1,
const int trt_max_shape = 1280,
const int trt_opt_shape = 640,
bool trt_calib_mode = false,
const int min_box_area = 200) {
this->device_ = device;
this->gpu_id_ = gpu_id;
this->cpu_math_library_num_threads_ = cpu_threads;
this->use_mkldnn_ = use_mkldnn;
this->trt_min_shape_ = trt_min_shape;
this->trt_max_shape_ = trt_max_shape;
this->trt_opt_shape_ = trt_opt_shape;
this->trt_calib_mode_ = trt_calib_mode;
config_.load_config(model_dir);
this->use_dynamic_shape_ = config_.use_dynamic_shape_;
this->min_subgraph_size_ = config_.min_subgraph_size_;
threshold_ = config_.draw_threshold_;
preprocessor_.Init(config_.preprocess_info_);
LoadModel(model_dir, batch_size, run_mode);
this->min_box_area_ = min_box_area;
this->conf_thresh_ = config_.conf_thresh_;
}
// Load Paddle inference model
void LoadModel(const std::string& model_dir,
const int batch_size = 1,
const std::string& run_mode = "paddle");
// Run predictor
void Predict(const std::vector<cv::Mat> imgs,
const double threshold = 0.5,
const int warmup = 0,
const int repeats = 1,
MOT_Result* result = nullptr,
std::vector<double>* times = nullptr);
private:
std::string device_ = "CPU";
int gpu_id_ = 0;
int cpu_math_library_num_threads_ = 1;
bool use_mkldnn_ = false;
int min_subgraph_size_ = 3;
bool use_dynamic_shape_ = false;
int trt_min_shape_ = 1;
int trt_max_shape_ = 1280;
int trt_opt_shape_ = 640;
bool trt_calib_mode_ = false;
// Preprocess image and copy data to input buffer
void Preprocess(const cv::Mat& image_mat);
// Postprocess result
void Postprocess(const cv::Mat dets, const cv::Mat emb, MOT_Result* result);
std::shared_ptr<Predictor> predictor_;
Preprocessor preprocessor_;
ImageBlob inputs_;
std::vector<float> bbox_data_;
std::vector<float> emb_data_;
float threshold_;
ConfigPaser config_;
float min_box_area_;
float conf_thresh_;
};
} // namespace PaddleDetection
| PaddleDetection/deploy/cpp/include/jde_detector.h/0 | {
"file_path": "PaddleDetection/deploy/cpp/include/jde_detector.h",
"repo_id": "PaddleDetection",
"token_count": 1772
} | 45 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <glog/logging.h>
#include <math.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <string>
#include <vector>
#ifdef _WIN32
#include <direct.h>
#include <io.h>
#elif LINUX
#include <stdarg.h>
#include <sys/stat.h>
#endif
#include <gflags/gflags.h>
#include <opencv2/opencv.hpp>
#include "include/jde_detector.h"
#include "include/object_detector.h"
DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_int32(batch_size, 1, "batch_size");
DEFINE_string(
video_file,
"",
"Path of input video, `video_file` or `camera_id` has a highest priority.");
DEFINE_int32(camera_id, -1, "Device id of camera to predict");
DEFINE_bool(
use_gpu,
false,
"Deprecated, please use `--device` to set the device you want to run.");
DEFINE_string(device,
"CPU",
"Choose the device you want to run, it can be: CPU/GPU/XPU, "
"default is CPU.");
DEFINE_double(threshold, 0.5, "Threshold of score.");
DEFINE_string(output_dir, "output", "Directory of output visualization files.");
DEFINE_string(run_mode,
"paddle",
"Mode of running(paddle/trt_fp32/trt_fp16/trt_int8)");
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute");
DEFINE_bool(run_benchmark,
false,
"Whether to predict a image_file repeatedly for benchmark");
DEFINE_bool(use_mkldnn, false, "Whether use mkldnn with CPU");
DEFINE_int32(cpu_threads, 1, "Num of threads with CPU");
DEFINE_int32(trt_min_shape, 1, "Min shape of TRT DynamicShapeI");
DEFINE_int32(trt_max_shape, 1280, "Max shape of TRT DynamicShapeI");
DEFINE_int32(trt_opt_shape, 640, "Opt shape of TRT DynamicShapeI");
DEFINE_bool(trt_calib_mode,
false,
"If the model is produced by TRT offline quantitative calibration, "
"trt_calib_mode need to set True");
void PrintBenchmarkLog(std::vector<double> det_time, int img_num) {
LOG(INFO) << "----------------------- Config info -----------------------";
LOG(INFO) << "runtime_device: " << FLAGS_device;
LOG(INFO) << "ir_optim: "
<< "True";
LOG(INFO) << "enable_memory_optim: "
<< "True";
int has_trt = FLAGS_run_mode.find("trt");
if (has_trt >= 0) {
LOG(INFO) << "enable_tensorrt: "
<< "True";
std::string precision = FLAGS_run_mode.substr(4, 8);
LOG(INFO) << "precision: " << precision;
} else {
LOG(INFO) << "enable_tensorrt: "
<< "False";
LOG(INFO) << "precision: "
<< "fp32";
}
LOG(INFO) << "enable_mkldnn: " << (FLAGS_use_mkldnn ? "True" : "False");
LOG(INFO) << "cpu_math_library_num_threads: " << FLAGS_cpu_threads;
LOG(INFO) << "----------------------- Data info -----------------------";
LOG(INFO) << "batch_size: " << FLAGS_batch_size;
LOG(INFO) << "input_shape: "
<< "dynamic shape";
LOG(INFO) << "----------------------- Model info -----------------------";
FLAGS_model_dir.erase(FLAGS_model_dir.find_last_not_of("/") + 1);
LOG(INFO) << "model_name: "
<< FLAGS_model_dir.substr(FLAGS_model_dir.find_last_of('/') + 1);
LOG(INFO) << "----------------------- Perf info ------------------------";
LOG(INFO) << "Total number of predicted data: " << img_num
<< " and total time spent(ms): "
<< std::accumulate(det_time.begin(), det_time.end(), 0);
LOG(INFO) << "preproce_time(ms): " << det_time[0] / img_num
<< ", inference_time(ms): " << det_time[1] / img_num
<< ", postprocess_time(ms): " << det_time[2] / img_num;
}
static std::string DirName(const std::string& filepath) {
auto pos = filepath.rfind(OS_PATH_SEP);
if (pos == std::string::npos) {
return "";
}
return filepath.substr(0, pos);
}
static bool PathExists(const std::string& path) {
#ifdef _WIN32
struct _stat buffer;
return (_stat(path.c_str(), &buffer) == 0);
#else
struct stat buffer;
return (stat(path.c_str(), &buffer) == 0);
#endif // !_WIN32
}
static void MkDir(const std::string& path) {
if (PathExists(path)) return;
int ret = 0;
#ifdef _WIN32
ret = _mkdir(path.c_str());
#else
ret = mkdir(path.c_str(), 0755);
#endif // !_WIN32
if (ret != 0) {
std::string path_error(path);
path_error += " mkdir failed!";
throw std::runtime_error(path_error);
}
}
static void MkDirs(const std::string& path) {
if (path.empty()) return;
if (PathExists(path)) return;
MkDirs(DirName(path));
MkDir(path);
}
void PredictVideo(const std::string& video_path,
PaddleDetection::JDEDetector* mot,
const std::string& output_dir = "output") {
// Open video
cv::VideoCapture capture;
std::string video_out_name = "output.mp4";
if (FLAGS_camera_id != -1) {
capture.open(FLAGS_camera_id);
} else {
capture.open(video_path.c_str());
video_out_name =
video_path.substr(video_path.find_last_of(OS_PATH_SEP) + 1);
}
if (!capture.isOpened()) {
printf("can not open video : %s\n", video_path.c_str());
return;
}
// Get Video info : resolution, fps, frame count
int video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
int video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
int video_frame_count =
static_cast<int>(capture.get(CV_CAP_PROP_FRAME_COUNT));
printf("fps: %d, frame_count: %d\n", video_fps, video_frame_count);
// Create VideoWriter for output
cv::VideoWriter video_out;
std::string video_out_path(output_dir);
if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {
video_out_path += OS_PATH_SEP;
}
video_out_path += video_out_name;
video_out.open(video_out_path.c_str(),
0x00000021,
video_fps,
cv::Size(video_width, video_height),
true);
if (!video_out.isOpened()) {
printf("create video writer failed!\n");
return;
}
PaddleDetection::MOT_Result result;
std::vector<double> det_times(3);
double times;
// Capture all frames and do inference
cv::Mat frame;
int frame_id = 1;
while (capture.read(frame)) {
if (frame.empty()) {
break;
}
std::vector<cv::Mat> imgs;
imgs.push_back(frame);
printf("detect frame: %d\n", frame_id);
mot->Predict(imgs, FLAGS_threshold, 0, 1, &result, &det_times);
frame_id += 1;
times = std::accumulate(det_times.begin(), det_times.end(), 0) / frame_id;
cv::Mat out_im = PaddleDetection::VisualizeTrackResult(
frame, result, 1000. / times, frame_id);
video_out.write(out_im);
}
capture.release();
video_out.release();
PrintBenchmarkLog(det_times, frame_id);
printf("Visualized output saved as %s\n", video_out_path.c_str());
}
int main(int argc, char** argv) {
// Parsing command-line
google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_dir.empty() || FLAGS_video_file.empty()) {
std::cout << "Usage: ./main --model_dir=/PATH/TO/INFERENCE_MODEL/ "
<< "--video_file=/PATH/TO/INPUT/VIDEO/" << std::endl;
return -1;
}
if (!(FLAGS_run_mode == "paddle" || FLAGS_run_mode == "trt_fp32" ||
FLAGS_run_mode == "trt_fp16" || FLAGS_run_mode == "trt_int8")) {
std::cout
<< "run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or 'trt_int8'.";
return -1;
}
transform(FLAGS_device.begin(),
FLAGS_device.end(),
FLAGS_device.begin(),
::toupper);
if (!(FLAGS_device == "CPU" || FLAGS_device == "GPU" ||
FLAGS_device == "XPU")) {
std::cout << "device should be 'CPU', 'GPU' or 'XPU'.";
return -1;
}
if (FLAGS_use_gpu) {
std::cout << "Deprecated, please use `--device` to set the device you want "
"to run.";
return -1;
}
// Do inference on input video or image
PaddleDetection::JDEDetector mot(FLAGS_model_dir,
FLAGS_device,
FLAGS_use_mkldnn,
FLAGS_cpu_threads,
FLAGS_run_mode,
FLAGS_batch_size,
FLAGS_gpu_id,
FLAGS_trt_min_shape,
FLAGS_trt_max_shape,
FLAGS_trt_opt_shape,
FLAGS_trt_calib_mode);
if (!PathExists(FLAGS_output_dir)) {
MkDirs(FLAGS_output_dir);
}
PredictVideo(FLAGS_video_file, &mot, FLAGS_output_dir);
return 0;
}
| PaddleDetection/deploy/cpp/src/main_jde.cc/0 | {
"file_path": "PaddleDetection/deploy/cpp/src/main_jde.cc",
"repo_id": "PaddleDetection",
"token_count": 4064
} | 46 |
{
"model_dir_det": "./model_det/",
"batch_size_det": 1,
"threshold_det": 0.5,
"model_dir_keypoint": "./model_keypoint/",
"batch_size_keypoint": 8,
"threshold_keypoint": 0.5,
"image_file": "./demo.jpg",
"image_dir": "",
"run_benchmark": false,
"cpu_threads": 4,
"use_dark_decode": true
}
| PaddleDetection/deploy/lite/keypoint_runtime_config.json/0 | {
"file_path": "PaddleDetection/deploy/lite/keypoint_runtime_config.json",
"repo_id": "PaddleDetection",
"token_count": 140
} | 47 |
crop_thresh: 0.5
visual: True
warmup_frame: 50
DET:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip
batch_size: 1
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip
tracker_config: deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True
VEHICLE_ATTR:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip
batch_size: 8
color_threshold: 0.5
type_threshold: 0.5
enable: True
| PaddleDetection/deploy/pipeline/config/examples/infer_cfg_vehicle_attr.yml/0 | {
"file_path": "PaddleDetection/deploy/pipeline/config/examples/infer_cfg_vehicle_attr.yml",
"repo_id": "PaddleDetection",
"token_count": 241
} | 48 |
[English](pphuman_attribute_en.md) | 简体中文
# PP-Human属性识别模块
行人属性识别在智慧社区,工业巡检,交通监控等方向都具有广泛应用,PP-Human中集成了属性识别模块,属性包含性别、年龄、帽子、眼镜、上衣下衣款式等。我们提供了预训练模型,用户可以直接下载使用。
| 任务 | 算法 | 精度 | 预测速度(ms) |下载链接 |
|:---------------------|:---------:|:------:|:------:| :---------------------------------------------------------------------------------: |
| 行人检测/跟踪 | PP-YOLOE | mAP: 56.3 <br> MOTA: 72.0 | 检测: 16.2ms <br> 跟踪:22.3ms |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip) |
| 行人属性高精度模型 | PP-HGNet_small | mA: 95.4 | 单人 1.54ms | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_small_person_attribute_954_infer.zip) |
| 行人属性轻量级模型 | PP-LCNet_x1_0 | mA: 94.5 | 单人 0.54ms | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPLCNet_x1_0_person_attribute_945_infer.zip) |
| 行人属性精度与速度均衡模型 | PP-HGNet_tiny | mA: 95.2 | 单人 1.14ms | [下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_tiny_person_attribute_952_infer.zip) |
1. 检测/跟踪模型精度为[MOT17](https://motchallenge.net/),[CrowdHuman](http://www.crowdhuman.org/),[HIEVE](http://humaninevents.org/)和部分业务数据融合训练测试得到。
2. 行人属性分析精度为[PA100k](https://github.com/xh-liu/HydraPlus-Net#pa-100k-dataset),[RAPv2](http://www.rapdataset.com/rapv2.html),[PETA](http://mmlab.ie.cuhk.edu.hk/projects/PETA.html)和部分业务数据融合训练测试得到
3. 预测速度为V100 机器上使用TensorRT FP16时的速度, 该处测速速度为模型预测速度
4. 属性模型应用依赖跟踪模型结果,请在[跟踪模型页面](./pphuman_mot.md)下载跟踪模型,依自身需求选择高精或轻量级下载。
5. 模型下载后解压放置在PaddleDetection/output_inference/目录下。
## 使用方法
1. 从上表链接中下载模型并解压到```PaddleDetection/output_inference```路径下,并修改配置文件中模型路径,也可默认自动下载模型。设置```deploy/pipeline/config/infer_cfg_pphuman.yml```中`ATTR`的enable: True
`infer_cfg_pphuman.yml`中配置项说明:
```
ATTR: #模块名称
model_dir: output_inference/PPLCNet_x1_0_person_attribute_945_infer/ #模型路径
batch_size: 8 #推理最大batchsize
enable: False #功能是否开启
```
2. 图片输入时,启动命令如下(更多命令参数说明,请参考[快速开始-参数说明](./PPHuman_QUICK_STARTED.md#41-参数说明))。
```python
#单张图片
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--image_file=test_image.jpg \
--device=gpu \
#图片文件夹
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--image_dir=images/ \
--device=gpu \
```
3. 视频输入时,启动命令如下
```python
#单个视频文件
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--video_file=test_video.mp4 \
--device=gpu \
#视频文件夹
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml \
--video_dir=test_videos/ \
--device=gpu \
```
4. 若修改模型路径,有以下两种方式:
- 方法一:```./deploy/pipeline/config/infer_cfg_pphuman.yml```下可以配置不同模型路径,属性识别模型修改ATTR字段下配置
- 方法二:命令行中--config后面紧跟着增加`-o ATTR.model_dir`修改模型路径:
```python
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml
-o ATTR.model_dir=output_inference/PPLCNet_x1_0_person_attribute_945_infer/\
--video_file=test_video.mp4 \
--device=gpu
```
测试效果如下:
<div width="600" align="center">
<img src="https://user-images.githubusercontent.com/22989727/205597518-7a602bd5-e643-44a1-a4ca-03c9ffecd918.gif"/>
</div>
数据来源及版权归属:天覆科技,感谢提供并开源实际场景数据,仅限学术研究使用
## 方案说明
1. 目标检测/多目标跟踪获取图片/视频输入中的行人检测框,模型方案为PP-YOLOE,详细文档参考[PP-YOLOE](../../../configs/ppyoloe/README_cn.md)
2. 通过行人检测框的坐标在输入图像中截取每个行人
3. 使用属性识别分析每个行人对应属性,属性类型与PA100k数据集相同,具体属性列表如下:
```
- 性别:男、女
- 年龄:小于18、18-60、大于60
- 朝向:朝前、朝后、侧面
- 配饰:眼镜、帽子、无
- 正面持物:是、否
- 包:双肩包、单肩包、手提包
- 上衣风格:带条纹、带logo、带格子、拼接风格
- 下装风格:带条纹、带图案
- 短袖上衣:是、否
- 长袖上衣:是、否
- 长外套:是、否
- 长裤:是、否
- 短裤:是、否
- 短裙&裙子:是、否
- 穿靴:是、否
```
4. 属性识别模型方案为[StrongBaseline](https://arxiv.org/pdf/2107.03576.pdf),模型结构更改为基于PP-HGNet、PP-LCNet的多分类网络结构,引入Weighted BCE loss提升模型效果。
## 参考文献
```
@article{jia2020rethinking,
title={Rethinking of pedestrian attribute recognition: Realistic datasets with efficient method},
author={Jia, Jian and Huang, Houjing and Yang, Wenjie and Chen, Xiaotang and Huang, Kaiqi},
journal={arXiv preprint arXiv:2005.11909},
year={2020}
}
```
| PaddleDetection/deploy/pipeline/docs/tutorials/pphuman_attribute.md/0 | {
"file_path": "PaddleDetection/deploy/pipeline/docs/tutorials/pphuman_attribute.md",
"repo_id": "PaddleDetection",
"token_count": 4155
} | 49 |
[English](ppvehicle_retrograde_en.md) | 简体中文
# PP-Vehicle车辆逆行识别模块
车辆逆行识别在智慧城市,智慧交通等方向具有广泛应用。在PP-Vehicle中,集成了车辆逆行识别模块,可识别车辆是否逆行。
| 任务 | 算法 | 精度 | 预测速度 | 下载链接|
|-----------|------|-----------|----------|---------------|
| 车辆检测/跟踪 | PP-YOLOE | mAP 63.9 | 38.67ms | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip) |
| 车道线识别 | PP-liteseg | mIou 32.69 | 47 ms | [预测部署模型](https://bj.bcebos.com/v1/paddledet/models/pipeline/pp_lite_stdc2_bdd100k.zip) |
注意:
1. 车辆检测/跟踪模型预测速度是基于NVIDIA T4, 开启TensorRT FP16得到。模型预测速度包含数据预处理、模型预测、后处理部分。
2. 车辆检测/跟踪模型的训练和精度测试均基于[VeRi数据集](https://www.v7labs.com/open-datasets/veri-dataset)。
3. 车道线模型预测速度基于Tesla P40,python端预测,模型预测速度包含数据预处理、模型预测、后处理部分。
4. 车道线模型训练和精度测试均基于[BDD100K-LaneSeg](https://bdd-data.berkeley.edu/portal.html#download.)和[Apollo Scape](http://apolloscape.auto/lane_segmentation.html#to_dataset_href)。两个数据集的标签文件[Lane_dataset_label](https://bj.bcebos.com/v1/paddledet/data/mot/bdd100k/lane_dataset_label.zip)
## 使用方法
### 配置项说明
[配置文件](../../config/infer_cfg_ppvehicle.yml)中与车辆逆行识别模块相关的参数如下:
```
LANE_SEG:
lane_seg_config: deploy/pipeline/config/lane_seg_config.yml #车道线提取配置文件
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/pp_lite_stdc2_bdd100k.zip #模型文件路径
VEHICLE_RETROGRADE:
frame_len: 8 #采样帧数
sample_freq: 7 #采样频率
enable: True #开启车辆逆行判断功能
filter_horizontal_flag: False #是否过滤水平方向车辆
keep_right_flag: True #按车辆靠右行驶规则,若车辆靠左行驶,则设为False
deviation: 23 #过滤水平方向车辆的角度阈值,如果大于该角度则过滤
move_scale: 0.01 #过滤静止车辆阈值,若车辆移动像素大于图片对角线*move_scale,则认为车辆移动,反之
#车辆静止
fence_line: [] #车道中间线坐标,格式[x1,y1,x2,y2] 且y2>y1。若为空,由程序根据车流方向自动判断
```
[车道线配置文件](../../config/lane_seg_config.yml)中与车道线提取相关的参数如下:
```
type: PLSLaneseg #选择分割模型
PLSLaneseg:
batch_size: 1 #图片batch_size
device: gpu #选择gpu还是cpu
filter_flag: True #是否过滤水平方向道路线
horizontal_filtration_degree: 23 #过滤水平方向车道线阈值,当分割出来的车道线最大倾斜角与
#最小倾斜角差值小于阈值时,不进行过滤
horizontal_filtering_threshold: 0.25 #确定竖直方向与水平方向分开阈值
#thr = (min_degree+max_degree)*0.25
#根据车道线倾斜角与thr的大小比较,将车道线分为垂直方向与水平方向
```
### 使用命令
1. 从模型库下载`车辆检测/跟踪`, `车道线识别`两个预测部署模型并解压到`./output_inference`路径下;默认会自动下载模型,如果手动下载,需要修改模型文件夹为模型存放路径。
2. 修改配置文件中`VEHICLE_RETROGRADE`项的`enable: True`,以启用该功能。
3. 车辆逆行识别功能需要视频输入时,启动命令如下:
```bash
#预测单个视频文件
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
-o VEHICLE_RETROGRADE.enable=true \
--video_file=test_video.mp4 \
--device=gpu
#预测包含一个或多个视频的文件夹
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
-o VEHICLE_RETROGRADE.enable=true \
--video_dir=test_video \
--device=gpu
```
5. 若修改模型路径,有以下两种方式:
- 方法一:`./deploy/pipeline/config/infer_cfg_ppvehicle.yml`下可以配置不同模型路径,车道线识别模型修改`LANE_SEG`字段下配置
- 方法二:直接在命令行中增加`-o`,以覆盖配置文件中的默认模型路径:
```bash
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--video_file=test_video.mp4 \
--device=gpu \
-o LANE_SEG.model_dir=output_inference/
VEHICLE_RETROGRADE.enable=true
```
测试效果如下:
<div width="1000" align="center">
<img src="https://raw.githubusercontent.com/LokeZhou/PaddleDetection/develop/deploy/pipeline/docs/images/vehicle_retrograde.gif"/>
</div>
**注意:**
- 车道线中间线自动判断条件:在采样的视频段内同时有两个相反方向的车辆,且判断一次后固定,不再更新;
- 因摄像头角度以及2d视角问题,车道线中间线判断存在不准确情况;
- 可在配置文件手动输入中间线坐标.参考[车辆违章配置文件](../../config/examples/infer_cfg_vehicle_violation.yml)
## 方案说明
1.车辆在采样视频段内,根据车道中间线的位置与车辆轨迹,判断车辆是否逆行,判断流程图:
<div width="1000" align="center">
<img src="https://raw.githubusercontent.com/LokeZhou/PaddleDetection/develop/deploy/pipeline/docs/images/vehicle_retrograde.png"/>
</div>
2.车道线识别模型使用了[PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg) 的超轻量分割方案。训练样本[标签](https://bj.bcebos.com/v1/paddledet/data/mot/bdd100k/lane_dataset_label.zip)分为4类:
0 背景
1 双黄线
2 实线
3 虚线
车辆逆行分析过滤虚线类;
3.车道线通过对分割结果聚类得到,且默认过滤水平方向车道线,若不过滤可在[车道线配置文件](../../config/lane_seg_config.yml)修改`filter_flag`参数;
4.车辆逆行判断默认过滤水平方向车辆,若不过滤可在[配置文件](../../config/infer_cfg_ppvehicle.yml)修改`filter_horizontal_flag`参数;
5.车辆逆行默认按靠右行驶规则判断,若修改,可在[配置文件](../../config/infer_cfg_ppvehicle.yml)修改`keep_right_flag`参数;
**性能优化措施**:
1.因摄像头视角原因,可以根据实际情况决定是否过滤水平方向车道线与水平方向车辆;
2.车道中间线可手动输入;
| PaddleDetection/deploy/pipeline/docs/tutorials/ppvehicle_retrograde.md/0 | {
"file_path": "PaddleDetection/deploy/pipeline/docs/tutorials/ppvehicle_retrograde.md",
"repo_id": "PaddleDetection",
"token_count": 4855
} | 50 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pptracking.python.mot.visualize import plot_tracking
from python.visualize import visualize_attr
import os
import re
import cv2
import gc
import numpy as np
try:
from sklearn import preprocessing
from sklearn.cluster import AgglomerativeClustering
except:
print(
'Warning: Unable to use MTMCT in PP-Human, please install sklearn, for example: `pip install sklearn`'
)
pass
import pandas as pd
from tqdm import tqdm
from functools import reduce
import warnings
warnings.filterwarnings("ignore")
def gen_restxt(output_dir_filename, map_tid, cid_tid_dict):
pattern = re.compile(r'c(\d)_t(\d)')
f_w = open(output_dir_filename, 'w')
for key, res in cid_tid_dict.items():
cid, tid = pattern.search(key).groups()
cid = int(cid) + 1
rects = res["rects"]
frames = res["frames"]
for idx, bbox in enumerate(rects):
bbox[0][3:] -= bbox[0][1:3]
fid = frames[idx] + 1
rect = [max(int(x), 0) for x in bbox[0][1:]]
if key in map_tid:
new_tid = map_tid[key]
f_w.write(
str(cid) + ' ' + str(new_tid) + ' ' + str(fid) + ' ' +
' '.join(map(str, rect)) + '\n')
print('gen_res: write file in {}'.format(output_dir_filename))
f_w.close()
def get_mtmct_matching_results(pred_mtmct_file, secs_interval=0.5,
video_fps=20):
res = np.loadtxt(pred_mtmct_file) # 'cid, tid, fid, x1, y1, w, h, -1, -1'
camera_ids = list(map(int, np.unique(res[:, 0])))
res = res[:, :7]
# each line in res: 'cid, tid, fid, x1, y1, w, h'
camera_tids = []
camera_results = dict()
for c_id in camera_ids:
camera_results[c_id] = res[res[:, 0] == c_id]
tids = np.unique(camera_results[c_id][:, 1])
tids = list(map(int, tids))
camera_tids.append(tids)
# select common tids throughout each video
common_tids = reduce(np.intersect1d, camera_tids)
# get mtmct matching results by cid_tid_fid_results[c_id][t_id][f_id]
cid_tid_fid_results = dict()
cid_tid_to_fids = dict()
interval = int(secs_interval * video_fps) # preferably less than 10
for c_id in camera_ids:
cid_tid_fid_results[c_id] = dict()
cid_tid_to_fids[c_id] = dict()
for t_id in common_tids:
tid_mask = camera_results[c_id][:, 1] == t_id
cid_tid_fid_results[c_id][t_id] = dict()
camera_trackid_results = camera_results[c_id][tid_mask]
fids = np.unique(camera_trackid_results[:, 2])
fids = fids[fids % interval == 0]
fids = list(map(int, fids))
cid_tid_to_fids[c_id][t_id] = fids
for f_id in fids:
st_frame = f_id
ed_frame = f_id + interval
st_mask = camera_trackid_results[:, 2] >= st_frame
ed_mask = camera_trackid_results[:, 2] < ed_frame
frame_mask = np.logical_and(st_mask, ed_mask)
cid_tid_fid_results[c_id][t_id][f_id] = camera_trackid_results[
frame_mask]
return camera_results, cid_tid_fid_results
def save_mtmct_vis_results(camera_results, captures, output_dir,
multi_res=None):
# camera_results: 'cid, tid, fid, x1, y1, w, h'
camera_ids = list(camera_results.keys())
import shutil
save_dir = os.path.join(output_dir, 'mtmct_vis')
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
for idx, video_file in enumerate(captures):
capture = cv2.VideoCapture(video_file)
cid = camera_ids[idx]
basename = os.path.basename(video_file)
video_out_name = "vis_" + basename
out_path = os.path.join(save_dir, video_out_name)
print("Start visualizing output video: {}".format(out_path))
# Get Video info : resolution, fps, frame count
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
frame_id = 0
while (1):
if frame_id % 50 == 0:
print('frame id: ', frame_id)
ret, frame = capture.read()
frame_id += 1
if not ret:
if frame_id == 1:
print("video read failed!")
break
frame_results = camera_results[cid][camera_results[cid][:, 2] ==
frame_id]
boxes = frame_results[:, -4:]
ids = frame_results[:, 1]
image = plot_tracking(frame, boxes, ids, frame_id=frame_id, fps=fps)
# add attr vis
if multi_res:
tid_list = multi_res.keys() # c0_t1, c0_t2...
all_attr_result = [multi_res[i]["attrs"]
for i in tid_list] # all cid_tid result
if any(
all_attr_result
): # at least one cid_tid[attrs] is not None will goes to attrs_vis
attr_res = []
cid_str = 'c' + str(cid - 1) + "_"
for k in tid_list:
if not k.startswith(cid_str):
continue
if (frame_id - 1) >= len(multi_res[k]['attrs']):
t_attr = None
else:
t_attr = multi_res[k]['attrs'][frame_id - 1]
attr_res.append(t_attr)
assert len(attr_res) == len(boxes)
image = visualize_attr(
image, attr_res, boxes, is_mtmct=True)
writer.write(image)
writer.release()
def get_euclidean(x, y, **kwargs):
m = x.shape[0]
n = y.shape[0]
distmat = (np.power(x, 2).sum(axis=1, keepdims=True).repeat(
n, axis=1) + np.power(y, 2).sum(axis=1, keepdims=True).repeat(
m, axis=1).T)
distmat -= np.dot(2 * x, y.T)
return distmat
def cosine_similarity(x, y, eps=1e-12):
"""
Computes cosine similarity between two tensors.
Value == 1 means the same vector
Value == 0 means perpendicular vectors
"""
x_n, y_n = np.linalg.norm(
x, axis=1, keepdims=True), np.linalg.norm(
y, axis=1, keepdims=True)
x_norm = x / np.maximum(x_n, eps * np.ones_like(x_n))
y_norm = y / np.maximum(y_n, eps * np.ones_like(y_n))
sim_mt = np.dot(x_norm, y_norm.T)
return sim_mt
def get_cosine(x, y, eps=1e-12):
"""
Computes cosine distance between two tensors.
The cosine distance is the inverse cosine similarity
-> cosine_distance = abs(-cosine_distance) to make it
similar in behavior to euclidean distance
"""
sim_mt = cosine_similarity(x, y, eps)
return sim_mt
def get_dist_mat(x, y, func_name="euclidean"):
if func_name == "cosine":
dist_mat = get_cosine(x, y)
elif func_name == "euclidean":
dist_mat = get_euclidean(x, y)
print("Using {} as distance function during evaluation".format(func_name))
return dist_mat
def intracam_ignore(st_mask, cid_tids):
count = len(cid_tids)
for i in range(count):
for j in range(count):
if cid_tids[i][1] == cid_tids[j][1]:
st_mask[i, j] = 0.
return st_mask
def get_sim_matrix_new(cid_tid_dict, cid_tids):
# Note: camera independent get_sim_matrix function,
# which is different from the one in camera_utils.py.
count = len(cid_tids)
q_arr = np.array(
[cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
g_arr = np.array(
[cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
# compute distmat
distmat = get_dist_mat(q_arr, g_arr, func_name="cosine")
# 删除同一视频去重的逻辑,因为来源数据即使在同一carmel下也不会在同一进程下
# mask the element which belongs to same video
# st_mask = np.ones((count, count), dtype=np.float32)
# st_mask = intracam_ignore(st_mask, cid_tids)
# sim_matrix = distmat * st_mask
np.fill_diagonal(distmat, 0.)
return 1. - distmat
def get_match(cluster_labels):
cluster_dict = dict()
cluster = list()
for i, l in enumerate(cluster_labels):
if l in list(cluster_dict.keys()):
cluster_dict[l].append(i)
else:
cluster_dict[l] = [i]
for idx in cluster_dict:
cluster.append(cluster_dict[idx])
return cluster
def get_cid_tid(cluster_labels, cid_tids):
cluster = list()
for labels in cluster_labels:
cid_tid_list = list()
for label in labels:
cid_tid_list.append(cid_tids[label])
cluster.append(cid_tid_list)
return cluster
def get_labels(cid_tid_dict, cid_tids):
# compute cost matrix between features
cost_matrix = get_sim_matrix_new(cid_tid_dict, cid_tids)
# cluster all the features
cluster1 = AgglomerativeClustering(
n_clusters=None,
distance_threshold=0.8,
metric='precomputed',
linkage='complete')
cluster_labels1 = cluster1.fit_predict(cost_matrix)
labels = get_match(cluster_labels1)
# fixme delete this 未知的无用调用
# sub_cluster = get_cid_tid(labels, cid_tids)
return labels
def sub_cluster(cid_tid_dict):
'''
cid_tid_dict: all camera_id and track_id
'''
# get all keys
cid_tids = sorted([key for key in cid_tid_dict.keys()])
# cluster all trackid
clu = get_labels(cid_tid_dict, cid_tids)
# relabel every cluster groups
new_clu = list()
for c_list in clu:
new_clu.append([cid_tids[c] for c in c_list])
cid_tid_label = dict()
for i, c_list in enumerate(new_clu):
for c in c_list:
cid_tid_label[c] = i + 1
return cid_tid_label
def distill_idfeat(mot_res):
qualities_list = mot_res["qualities"]
# 修改获取向量的方式以兼容格式的更改
feature_list = list(map(lambda it: it['vector'], mot_res["features"]))
rects = mot_res["rects"]
qualities_new = []
feature_new = []
# filter rect less than 100*20
for idx, rect in enumerate(rects):
conf, xmin, ymin, xmax, ymax = rect[0]
if (xmax - xmin) * (ymax - ymin) and (xmax > xmin) > 2000:
qualities_new.append(qualities_list[idx])
feature_new.append(feature_list[idx])
# take all features if available rect is less than 2
if len(qualities_new) < 2:
qualities_new = qualities_list
feature_new = feature_list
# if available frames number is more than 200, take one frame data per 20 frames
skipf = 1
if len(qualities_new) > 20:
skipf = 2
quality_skip = np.array(qualities_new[::skipf])
feature_skip = np.array(feature_new[::skipf])
# sort features with image qualities, take the most trustworth features
topk_argq = np.argsort(quality_skip)[::-1]
if not topk_argq.any():
return topk_argq, 0
best_frame = topk_argq[0]
if (quality_skip > 0.6).sum() > 1:
topk_feat = feature_skip[topk_argq[quality_skip > 0.6]]
else:
topk_feat = feature_skip[topk_argq]
# get final features by mean or cluster, at most take five
mean_feat = np.mean(topk_feat[:5], axis=0)
return mean_feat, best_frame
def res2dict(multi_res):
cid_tid_dict = {}
for cid, c_res in enumerate(multi_res):
for tid, res in c_res["result"].items():
key = "c" + str(cid) + "_t" + str(tid)
if key not in cid_tid_dict:
if len(res["features"]) == 0:
continue
cid_tid_dict[key] = res
feat, _ = distill_idfeat(res)
cid_tid_dict[key]['mean_feat'] = feat
return cid_tid_dict
def mtmct_process(multi_res, captures, mtmct_vis=True, output_dir="output"):
cid_tid_dict = res2dict(multi_res)
if len(cid_tid_dict) == 0:
print("no tracking result found, mtmct will be skiped.")
return
map_tid = sub_cluster(cid_tid_dict)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
pred_mtmct_file = os.path.join(output_dir, 'mtmct_result.txt')
gen_restxt(pred_mtmct_file, map_tid, cid_tid_dict)
if mtmct_vis:
camera_results, cid_tid_fid_res = get_mtmct_matching_results(
pred_mtmct_file)
save_mtmct_vis_results(
camera_results,
captures,
output_dir=output_dir,
multi_res=cid_tid_dict)
| PaddleDetection/deploy/pipeline/pphuman/mtmct.py/0 | {
"file_path": "PaddleDetection/deploy/pipeline/pphuman/mtmct.py",
"repo_id": "PaddleDetection",
"token_count": 6618
} | 51 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import math
class VehiclePressingRecognizer(object):
def __init__(self, cfg):
self.cfg = cfg
def judge(self, Ax1, Ay1, Ax2, Ay2, Bx1, By1, Bx2, By2):
if (max(Ax1,Ax2)>=min(Bx1,Bx2) and min(Ax1,Ax2)<=max(Bx1,Bx2)) and \
(max(Ay1,Ay2)>=min(By1,By2) and min(Ay1,Ay2)<=max(By1,By2)):
if ((Bx1-Ax1)*(Ay2-Ay1)-(By1-Ay1)*(Ax2-Ax1)) * ((Bx2-Ax1)*(Ay2-Ay1)-(By2-Ay1)*(Ax2-Ax1))<=0 \
and ((Ax1-Bx1)*(By2-By1)-(Ay1-By1)*(Bx2-Bx1)) * ((Ax2-Bx1)*(By2-By1)-(Ay2-By1)*(Bx2-Bx1)) <=0:
return True
else:
return False
else:
return False
def is_intersect(self, line, bbox):
Ax1, Ay1, Ax2, Ay2 = line
xmin, ymin, xmax, ymax = bbox
bottom = self.judge(Ax1, Ay1, Ax2, Ay2, xmin, ymax, xmax, ymax)
return bottom
def run(self, lanes, det_res):
intersect_bbox_list = []
start_idx, boxes_num_i = 0, 0
for i in range(len(lanes)):
lane = lanes[i]
if det_res is not None:
det_res_i = {}
boxes_num_i = det_res['boxes_num'][i]
det_res_i['boxes'] = det_res['boxes'][start_idx:start_idx +
boxes_num_i, :]
intersect_bbox = []
for line in lane:
for bbox in det_res_i['boxes']:
if self.is_intersect(line, bbox[2:]):
intersect_bbox.append(bbox)
intersect_bbox_list.append(intersect_bbox)
start_idx += boxes_num_i
return intersect_bbox_list
def mot_run(self, lanes, det_res):
intersect_bbox_list = []
if det_res is None:
return intersect_bbox_list
lanes_res = lanes['output']
for i in range(len(lanes_res)):
lane = lanes_res[i]
for line in lane:
for bbox in det_res:
if self.is_intersect(line, bbox[3:]):
intersect_bbox_list.append(bbox)
return intersect_bbox_list | PaddleDetection/deploy/pipeline/ppvehicle/vehicle_pressing.py/0 | {
"file_path": "PaddleDetection/deploy/pipeline/ppvehicle/vehicle_pressing.py",
"repo_id": "PaddleDetection",
"token_count": 1439
} | 52 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sstream>
// for setprecision
#include <chrono>
#include <iomanip>
#include <iostream>
#include "include/postprocess.h"
namespace PaddleDetection {
cv::Scalar GetColor(int idx) {
idx = idx * 3;
cv::Scalar color =
cv::Scalar((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255);
return color;
}
cv::Mat VisualizeTrackResult(const cv::Mat& img,
const MOTResult& results,
const float fps,
const int frame_id) {
cv::Mat vis_img = img.clone();
int im_h = img.rows;
int im_w = img.cols;
float text_scale = std::max(1, static_cast<int>(im_w / 1600.));
float text_thickness = 2.;
float line_thickness = std::max(1, static_cast<int>(im_w / 500.));
std::ostringstream oss;
oss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
oss << "frame: " << frame_id << " ";
oss << "fps: " << fps << " ";
oss << "num: " << results.size();
std::string text = oss.str();
cv::Point origin;
origin.x = 0;
origin.y = static_cast<int>(15 * text_scale);
cv::putText(vis_img,
text,
origin,
cv::FONT_HERSHEY_PLAIN,
text_scale,
(0, 0, 255),
2);
for (int i = 0; i < results.size(); ++i) {
const int obj_id = results[i].ids;
const float score = results[i].score;
cv::Scalar color = GetColor(obj_id);
cv::Point pt1 = cv::Point(results[i].rects.left, results[i].rects.top);
cv::Point pt2 = cv::Point(results[i].rects.right, results[i].rects.bottom);
cv::Point id_pt =
cv::Point(results[i].rects.left, results[i].rects.top + 10);
cv::Point score_pt =
cv::Point(results[i].rects.left, results[i].rects.top - 10);
cv::rectangle(vis_img, pt1, pt2, color, line_thickness);
std::ostringstream idoss;
idoss << std::setiosflags(std::ios::fixed) << std::setprecision(4);
idoss << obj_id;
std::string id_text = idoss.str();
cv::putText(vis_img,
id_text,
id_pt,
cv::FONT_HERSHEY_PLAIN,
text_scale,
cv::Scalar(0, 255, 255),
text_thickness);
std::ostringstream soss;
soss << std::setiosflags(std::ios::fixed) << std::setprecision(2);
soss << score;
std::string score_text = soss.str();
cv::putText(vis_img,
score_text,
score_pt,
cv::FONT_HERSHEY_PLAIN,
text_scale,
cv::Scalar(0, 255, 255),
text_thickness);
}
return vis_img;
}
void FlowStatistic(const MOTResult& results,
const int frame_id,
const int secs_interval,
const bool do_entrance_counting,
const int video_fps,
const Rect entrance,
std::set<int>* id_set,
std::set<int>* interval_id_set,
std::vector<int>* in_id_list,
std::vector<int>* out_id_list,
std::map<int, std::vector<float>>* prev_center,
std::vector<std::string>* records) {
if (frame_id == 0) interval_id_set->clear();
if (do_entrance_counting) {
// Count in and out number:
// Use horizontal center line as the entrance just for simplification.
// If a person located in the above the horizontal center line
// at the previous frame and is in the below the line at the current frame,
// the in number is increased by one.
// If a person was in the below the horizontal center line
// at the previous frame and locates in the below the line at the current
// frame,
// the out number is increased by one.
// TODO(qianhui): if the entrance is not the horizontal center line,
// the counting method should be optimized.
float entrance_y = entrance.top;
for (const auto& result : results) {
float center_x = (result.rects.left + result.rects.right) / 2;
float center_y = (result.rects.top + result.rects.bottom) / 2;
int ids = result.ids;
std::map<int, std::vector<float>>::iterator iter;
iter = prev_center->find(ids);
if (iter != prev_center->end()) {
if (iter->second[1] <= entrance_y && center_y > entrance_y) {
in_id_list->push_back(ids);
}
if (iter->second[1] >= entrance_y && center_y < entrance_y) {
out_id_list->push_back(ids);
}
(*prev_center)[ids][0] = center_x;
(*prev_center)[ids][1] = center_y;
} else {
prev_center->insert(
std::pair<int, std::vector<float>>(ids, {center_x, center_y}));
}
}
}
// Count totol number, number at a manual-setting interval
for (const auto& result : results) {
id_set->insert(result.ids);
interval_id_set->insert(result.ids);
}
std::ostringstream os;
os << "Frame id: " << frame_id << ", Total count: " << id_set->size();
if (do_entrance_counting) {
os << ", In count: " << in_id_list->size()
<< ", Out count: " << out_id_list->size();
}
// Reset counting at the interval beginning
int curr_interval_count = -1;
if (frame_id % video_fps == 0 && frame_id / video_fps % secs_interval == 0) {
curr_interval_count = interval_id_set->size();
os << ", Count during " << secs_interval
<< " secs: " << curr_interval_count;
interval_id_set->clear();
}
os << "\n";
std::string record = os.str();
records->push_back(record);
LOG(INFO) << record;
}
void SaveMOTResult(const MOTResult& results,
const int frame_id,
std::vector<std::string>* records) {
// result format: frame_id, track_id, x1, y1, w, h
std::string record;
for (int i = 0; i < results.size(); ++i) {
MOTTrack mot_track = results[i];
int ids = mot_track.ids;
float score = mot_track.score;
Rect rects = mot_track.rects;
float x1 = rects.left;
float y1 = rects.top;
float x2 = rects.right;
float y2 = rects.bottom;
float w = x2 - x1;
float h = y2 - y1;
if (w == 0 || h == 0) {
continue;
}
std::ostringstream os;
os << frame_id << " " << ids << " " << x1 << " " << y1 << " " << w << " "
<< h << "\n";
record = os.str();
records->push_back(record);
}
}
} // namespace PaddleDetection
| PaddleDetection/deploy/pptracking/cpp/src/postprocess.cc/0 | {
"file_path": "PaddleDetection/deploy/pptracking/cpp/src/postprocess.cc",
"repo_id": "PaddleDetection",
"token_count": 3112
} | 53 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import base_jde_tracker
from . import base_sde_tracker
from .base_jde_tracker import *
from .base_sde_tracker import *
from . import jde_tracker
from . import deepsort_tracker
from . import ocsort_tracker
from . import center_tracker
from .jde_tracker import *
from .deepsort_tracker import *
from .ocsort_tracker import *
from .botsort_tracker import *
from .center_tracker import *
| PaddleDetection/deploy/pptracking/python/mot/tracker/__init__.py/0 | {
"file_path": "PaddleDetection/deploy/pptracking/python/mot/tracker/__init__.py",
"repo_id": "PaddleDetection",
"token_count": 294
} | 54 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import cv2
import time
import numpy as np
import collections
import math
__all__ = [
'MOTTimer', 'Detection', 'write_mot_results', 'load_det_results',
'preprocess_reid', 'get_crops', 'clip_box', 'scale_coords',
'flow_statistic', 'update_object_info'
]
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
class MOTTimer(object):
"""
This class used to compute and print the current FPS while evaling.
"""
def __init__(self, window_size=20):
self.start_time = 0.
self.diff = 0.
self.duration = 0.
self.deque = collections.deque(maxlen=window_size)
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.deque.append(self.diff)
if average:
self.duration = np.mean(self.deque)
else:
self.duration = np.sum(self.deque)
return self.duration
def clear(self):
self.start_time = 0.
self.diff = 0.
self.duration = 0.
class Detection(object):
"""
This class represents a bounding box detection in a single image.
Args:
tlwh (Tensor): Bounding box in format `(top left x, top left y,
width, height)`.
score (Tensor): Bounding box confidence score.
feature (Tensor): A feature vector that describes the object
contained in this image.
cls_id (Tensor): Bounding box category id.
"""
def __init__(self, tlwh, score, feature, cls_id):
self.tlwh = np.asarray(tlwh, dtype=np.float32)
self.score = float(score)
self.feature = np.asarray(feature, dtype=np.float32)
self.cls_id = int(cls_id)
def to_tlbr(self):
"""
Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
"""
Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def write_mot_results(filename, results, data_type='mot', num_classes=1):
# support single and multi classes
if data_type in ['mot', 'mcmot']:
save_format = '{frame},{id},{x1},{y1},{w},{h},{score},{cls_id},-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} car 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
f = open(filename, 'w')
for cls_id in range(num_classes):
for frame_id, tlwhs, tscores, track_ids in results[cls_id]:
if data_type == 'kitti':
frame_id -= 1
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0: continue
if data_type == 'mot':
cls_id = -1
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(
frame=frame_id,
id=track_id,
x1=x1,
y1=y1,
x2=x2,
y2=y2,
w=w,
h=h,
score=score,
cls_id=cls_id)
f.write(line)
print('MOT results save in {}'.format(filename))
def load_det_results(det_file, num_frames):
assert os.path.exists(det_file) and os.path.isfile(det_file), \
'{} is not exist or not a file.'.format(det_file)
labels = np.loadtxt(det_file, dtype='float32', delimiter=',')
assert labels.shape[1] == 7, \
"Each line of {} should have 7 items: '[frame_id],[x0],[y0],[w],[h],[score],[class_id]'.".format(det_file)
results_list = []
for frame_i in range(num_frames):
results = {'bbox': [], 'score': [], 'cls_id': []}
lables_with_frame = labels[labels[:, 0] == frame_i + 1]
# each line of lables_with_frame:
# [frame_id],[x0],[y0],[w],[h],[score],[class_id]
for l in lables_with_frame:
results['bbox'].append(l[1:5])
results['score'].append(l[5:6])
results['cls_id'].append(l[6:7])
results_list.append(results)
return results_list
def scale_coords(coords, input_shape, im_shape, scale_factor):
# Note: ratio has only one value, scale_factor[0] == scale_factor[1]
#
# This function only used for JDE YOLOv3 or other detectors with
# LetterBoxResize and JDEBBoxPostProcess, coords output from detector had
# not scaled back to the origin image.
ratio = scale_factor[0]
pad_w = (input_shape[1] - int(im_shape[1])) / 2
pad_h = (input_shape[0] - int(im_shape[0])) / 2
coords[:, 0::2] -= pad_w
coords[:, 1::2] -= pad_h
coords[:, 0:4] /= ratio
coords[:, :4] = np.clip(coords[:, :4], a_min=0, a_max=coords[:, :4].max())
return coords.round()
def clip_box(xyxy, ori_image_shape):
H, W = ori_image_shape
xyxy[:, 0::2] = np.clip(xyxy[:, 0::2], a_min=0, a_max=W)
xyxy[:, 1::2] = np.clip(xyxy[:, 1::2], a_min=0, a_max=H)
w = xyxy[:, 2:3] - xyxy[:, 0:1]
h = xyxy[:, 3:4] - xyxy[:, 1:2]
mask = np.logical_and(h > 0, w > 0)
keep_idx = np.nonzero(mask)
return xyxy[keep_idx[0]], keep_idx
def get_crops(xyxy, ori_img, w, h):
crops = []
xyxy = xyxy.astype(np.int64)
ori_img = ori_img.transpose(1, 0, 2) # [h,w,3]->[w,h,3]
for i, bbox in enumerate(xyxy):
crop = ori_img[bbox[0]:bbox[2], bbox[1]:bbox[3], :]
crops.append(crop)
crops = preprocess_reid(crops, w, h)
return crops
def preprocess_reid(imgs,
w=64,
h=192,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
im_batch = []
for img in imgs:
img = cv2.resize(img, (w, h))
img = img[:, :, ::-1].astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
img = np.expand_dims(img, axis=0)
im_batch.append(img)
im_batch = np.concatenate(im_batch, 0)
return im_batch
def flow_statistic(result,
secs_interval,
do_entrance_counting,
do_break_in_counting,
region_type,
video_fps,
entrance,
id_set,
interval_id_set,
in_id_list,
out_id_list,
prev_center,
records,
in_out_records,
data_type='mot',
ids2names=['pedestrian']):
# Count in/out number:
# Note that 'region_type' should be one of ['horizontal', 'vertical', 'custom'],
# 'horizontal' and 'vertical' means entrance is the center line as the entrance when do_entrance_counting,
# 'custom' means entrance is a region defined by users when do_break_in_counting.
if do_entrance_counting:
assert region_type in [
'horizontal', 'vertical'
], "region_type should be 'horizontal' or 'vertical' when do entrance counting."
entrance_x, entrance_y = entrance[0], entrance[1]
frame_id, tlwhs, tscores, track_ids = result
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0: continue
if data_type == 'kitti':
frame_id -= 1
x1, y1, w, h = tlwh
center_x = x1 + w / 2.
center_y = y1 + h / 2.
if track_id in prev_center:
if region_type == 'horizontal':
# horizontal center line
if prev_center[track_id][1] <= entrance_y and \
center_y > entrance_y:
in_id_list.append(track_id)
# fixme func 简写
add_in_out_record(frame_id, in_out_records, track_id, "IN")
if prev_center[track_id][1] >= entrance_y and \
center_y < entrance_y:
out_id_list.append(track_id)
add_in_out_record(frame_id, in_out_records, track_id, "OUT")
else:
# vertical center line
if prev_center[track_id][0] <= entrance_x and \
center_x > entrance_x:
in_id_list.append(track_id, "")
add_in_out_record(frame_id, in_out_records, track_id, "IN")
if prev_center[track_id][0] >= entrance_x and \
center_x < entrance_x:
out_id_list.append(track_id)
add_in_out_record(frame_id, in_out_records, track_id, "OUT")
prev_center[track_id][0] = center_x
prev_center[track_id][1] = center_y
else:
prev_center[track_id] = [center_x, center_y]
if do_break_in_counting:
assert region_type in [
'custom'
], "region_type should be 'custom' when do break_in counting."
assert len(
entrance
) >= 4, "entrance should be at least 3 points and (w,h) of image when do break_in counting."
im_w, im_h = entrance[-1][:]
entrance = np.array(entrance[:-1])
frame_id, tlwhs, tscores, track_ids = result
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0: continue
if data_type == 'kitti':
frame_id -= 1
x1, y1, w, h = tlwh
center_x = min(x1 + w / 2., im_w - 1)
if ids2names[0] == 'pedestrian':
center_y = min(y1 + h, im_h - 1)
else:
center_y = min(y1 + h / 2, im_h - 1)
# counting objects in region of the first frame
if frame_id == 1:
if in_quadrangle([center_x, center_y], entrance, im_h, im_w):
in_id_list.append(-1)
else:
prev_center[track_id] = [center_x, center_y]
else:
if track_id in prev_center:
if not in_quadrangle(prev_center[track_id], entrance, im_h,
im_w) and in_quadrangle(
[center_x, center_y], entrance,
im_h, im_w):
in_id_list.append(track_id)
prev_center[track_id] = [center_x, center_y]
else:
prev_center[track_id] = [center_x, center_y]
# Count totol number, number at a manual-setting interval
frame_id, tlwhs, tscores, track_ids = result
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0: continue
id_set.add(track_id)
interval_id_set.add(track_id)
# Reset counting at the interval beginning
if frame_id % video_fps == 0 and frame_id / video_fps % secs_interval == 0:
curr_interval_count = len(interval_id_set)
interval_id_set.clear()
info = "Frame id: {}, Total count: {}".format(frame_id, len(id_set))
if do_entrance_counting:
info += ", In count: {}, Out count: {}".format(
len(in_id_list), len(out_id_list))
if do_break_in_counting:
info += ", Break_in count: {}".format(len(in_id_list))
if frame_id % video_fps == 0 and frame_id / video_fps % secs_interval == 0:
info += ", Count during {} secs: {}".format(secs_interval,
curr_interval_count)
interval_id_set.clear()
# print(info)
info += "\n"
records.append(info)
return {
"id_set": id_set,
"interval_id_set": interval_id_set,
"in_id_list": in_id_list,
"out_id_list": out_id_list,
"prev_center": prev_center,
"records": records,
}
def add_in_out_record(frame_id, in_out_records, track_id, action):
for record in in_out_records:
# 单个track只记录一次进出 在这里不考虑线程安全问题
if record["trackId"] == track_id:
return
logger.debug(f"{track_id} {action}")
in_out_records.append({
"trackId": track_id,
"frame": frame_id,
"action": action
})
def distance(center_1, center_2):
return math.sqrt(
math.pow(center_1[0] - center_2[0], 2) + math.pow(center_1[1] -
center_2[1], 2))
# update vehicle parking info
def update_object_info(object_in_region_info,
result,
region_type,
entrance,
fps,
illegal_parking_time,
distance_threshold_frame=3,
distance_threshold_interval=50):
'''
For consecutive frames, the distance between two frame is smaller than distance_threshold_frame, regard as parking
For parking in general, the move distance should smaller than distance_threshold_interval
The moving distance of the vehicle is scaled according to the y, which is inversely proportional to y.
'''
assert region_type in [
'custom'
], "region_type should be 'custom' when do break_in counting."
assert len(
entrance
) >= 4, "entrance should be at least 3 points and (w,h) of image when do break_in counting."
frame_id, tlwhs, tscores, track_ids = result # result from mot
im_w, im_h = entrance[-1][:]
entrance = np.array(entrance[:-1])
illegal_parking_dict = {}
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0: continue
x1, y1, w, h = tlwh
center_x = min(x1 + w / 2., im_w - 1)
center_y = min(y1 + h / 2, im_h - 1)
if not in_quadrangle([center_x, center_y], entrance, im_h, im_w):
continue
current_center = (center_x, center_y)
if track_id not in object_in_region_info.keys(
): # first time appear in region
object_in_region_info[track_id] = {}
object_in_region_info[track_id]["start_frame"] = frame_id
object_in_region_info[track_id]["end_frame"] = frame_id
object_in_region_info[track_id]["prev_center"] = current_center
object_in_region_info[track_id]["start_center"] = current_center
else:
prev_center = object_in_region_info[track_id]["prev_center"]
dis = distance(current_center, prev_center)
scaled_dis = 200 * dis / (
current_center[1] + 1) # scale distance according to y
dis = scaled_dis
if dis < distance_threshold_frame: # not move
object_in_region_info[track_id]["end_frame"] = frame_id
object_in_region_info[track_id]["prev_center"] = current_center
else: # move
object_in_region_info[track_id]["start_frame"] = frame_id
object_in_region_info[track_id]["end_frame"] = frame_id
object_in_region_info[track_id]["prev_center"] = current_center
object_in_region_info[track_id]["start_center"] = current_center
# whether current object parking
distance_from_start = distance(
object_in_region_info[track_id]["start_center"], current_center)
if distance_from_start > distance_threshold_interval:
# moved
object_in_region_info[track_id]["start_frame"] = frame_id
object_in_region_info[track_id]["end_frame"] = frame_id
object_in_region_info[track_id]["prev_center"] = current_center
object_in_region_info[track_id]["start_center"] = current_center
continue
if (object_in_region_info[track_id]["end_frame"] - object_in_region_info[track_id][
"start_frame"]) / fps >= illegal_parking_time \
and distance_from_start < distance_threshold_interval:
illegal_parking_dict[track_id] = {"bbox": [x1, y1, w, h]}
return object_in_region_info, illegal_parking_dict
def in_quadrangle(point, entrance, im_h, im_w):
mask = np.zeros((im_h, im_w, 1), np.uint8)
cv2.fillPoly(mask, [entrance], 255)
p = tuple(map(int, point))
if mask[p[1], p[0], :] > 0:
return True
else:
return False
| PaddleDetection/deploy/pptracking/python/mot/utils.py/0 | {
"file_path": "PaddleDetection/deploy/pptracking/python/mot/utils.py",
"repo_id": "PaddleDetection",
"token_count": 8867
} | 55 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this code is based on https://github.com/open-mmlab/mmpose/mmpose/core/post_processing/post_transforms.py
"""
import cv2
import numpy as np
class EvalAffine(object):
def __init__(self, size, stride=64):
super(EvalAffine, self).__init__()
self.size = size
self.stride = stride
def __call__(self, image, im_info):
s = self.size
h, w, _ = image.shape
trans, size_resized = get_affine_mat_kernel(h, w, s, inv=False)
image_resized = cv2.warpAffine(image, trans, size_resized)
return image_resized, im_info
def get_affine_mat_kernel(h, w, s, inv=False):
if w < h:
w_ = s
h_ = int(np.ceil((s / w * h) / 64.) * 64)
scale_w = w
scale_h = h_ / w_ * w
else:
h_ = s
w_ = int(np.ceil((s / h * w) / 64.) * 64)
scale_h = h
scale_w = w_ / h_ * h
center = np.array([np.round(w / 2.), np.round(h / 2.)])
size_resized = (w_, h_)
trans = get_affine_transform(
center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)
return trans, size_resized
def get_affine_transform(center,
input_size,
rot,
output_size,
shift=(0., 0.),
inv=False):
"""Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ]): Size of the destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
"""
assert len(center) == 2
assert len(output_size) == 2
assert len(shift) == 2
if not isinstance(input_size, (np.ndarray, list)):
input_size = np.array([input_size, input_size], dtype=np.float32)
scale_tmp = input_size
shift = np.array(shift)
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = rotate_point([0., src_w * -0.5], rot_rad)
dst_dir = np.array([0., dst_w * -0.5])
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def get_warp_matrix(theta, size_input, size_dst, size_target):
"""This code is based on
https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py
Calculate the transformation matrix under the constraint of unbiased.
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
Data Processing for Human Pose Estimation (CVPR 2020).
Args:
theta (float): Rotation angle in degrees.
size_input (np.ndarray): Size of input image [w, h].
size_dst (np.ndarray): Size of output image [w, h].
size_target (np.ndarray): Size of ROI in input plane [w, h].
Returns:
matrix (np.ndarray): A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = np.cos(theta) * scale_x
matrix[0, 1] = -np.sin(theta) * scale_x
matrix[0, 2] = scale_x * (
-0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *
np.sin(theta) + 0.5 * size_target[0])
matrix[1, 0] = np.sin(theta) * scale_y
matrix[1, 1] = np.cos(theta) * scale_y
matrix[1, 2] = scale_y * (
-0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *
np.cos(theta) + 0.5 * size_target[1])
return matrix
def rotate_point(pt, angle_rad):
"""Rotate a point by an angle.
Args:
pt (list[float]): 2 dimensional point to be rotated
angle_rad (float): rotation angle by radian
Returns:
list[float]: Rotated point.
"""
assert len(pt) == 2
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
new_x = pt[0] * cs - pt[1] * sn
new_y = pt[0] * sn + pt[1] * cs
rotated_pt = [new_x, new_y]
return rotated_pt
def _get_3rd_point(a, b):
"""To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): point(x,y)
b (np.ndarray): point(x,y)
Returns:
np.ndarray: The 3rd point.
"""
assert len(a) == 2
assert len(b) == 2
direction = a - b
third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
return third_pt
class TopDownEvalAffine(object):
"""apply affine transform to image and coords
Args:
trainsize (list): [w, h], the standard size used to train
use_udp (bool): whether to use Unbiased Data Processing.
records(dict): the dict contained the image and coords
Returns:
records (dict): contain the image and coords after tranformed
"""
def __init__(self, trainsize, use_udp=False):
self.trainsize = trainsize
self.use_udp = use_udp
def __call__(self, image, im_info):
rot = 0
imshape = im_info['im_shape'][::-1]
center = im_info['center'] if 'center' in im_info else imshape / 2.
scale = im_info['scale'] if 'scale' in im_info else imshape
if self.use_udp:
trans = get_warp_matrix(
rot, center * 2.0,
[self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)
image = cv2.warpAffine(
image,
trans, (int(self.trainsize[0]), int(self.trainsize[1])),
flags=cv2.INTER_LINEAR)
else:
trans = get_affine_transform(center, scale, rot, self.trainsize)
image = cv2.warpAffine(
image,
trans, (int(self.trainsize[0]), int(self.trainsize[1])),
flags=cv2.INTER_LINEAR)
return image, im_info
def expand_crop(images, rect, expand_ratio=0.3):
imgh, imgw, c = images.shape
label, conf, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]
if label != 0:
return None, None, None
org_rect = [xmin, ymin, xmax, ymax]
h_half = (ymax - ymin) * (1 + expand_ratio) / 2.
w_half = (xmax - xmin) * (1 + expand_ratio) / 2.
if h_half > w_half * 4 / 3:
w_half = h_half * 0.75
center = [(ymin + ymax) / 2., (xmin + xmax) / 2.]
ymin = max(0, int(center[0] - h_half))
ymax = min(imgh - 1, int(center[0] + h_half))
xmin = max(0, int(center[1] - w_half))
xmax = min(imgw - 1, int(center[1] + w_half))
return images[ymin:ymax, xmin:xmax, :], [xmin, ymin, xmax, ymax], org_rect
| PaddleDetection/deploy/python/keypoint_preprocess.py/0 | {
"file_path": "PaddleDetection/deploy/python/keypoint_preprocess.py",
"repo_id": "PaddleDetection",
"token_count": 3745
} | 56 |
import cv2
import math
import numpy as np
from preprocess_ops import get_affine_transform
class HRNetPostProcess(object):
def __init__(self, use_dark=True):
self.use_dark = use_dark
def flip_back(self, output_flipped, matched_parts):
assert output_flipped.ndim == 4,\
'output_flipped should be [batch_size, num_joints, height, width]'
output_flipped = output_flipped[:, :, :, ::-1]
for pair in matched_parts:
tmp = output_flipped[:, pair[0], :, :].copy()
output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]
output_flipped[:, pair[1], :, :] = tmp
return output_flipped
def get_max_preds(self, heatmaps):
"""get predictions from score maps
Args:
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints
"""
assert isinstance(heatmaps,
np.ndarray), 'heatmaps should be numpy.ndarray'
assert heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = heatmaps.shape[0]
num_joints = heatmaps.shape[1]
width = heatmaps.shape[3]
heatmaps_reshaped = heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def gaussian_blur(self, heatmap, kernel):
border = (kernel - 1) // 2
batch_size = heatmap.shape[0]
num_joints = heatmap.shape[1]
height = heatmap.shape[2]
width = heatmap.shape[3]
for i in range(batch_size):
for j in range(num_joints):
origin_max = np.max(heatmap[i, j])
dr = np.zeros((height + 2 * border, width + 2 * border))
dr[border:-border, border:-border] = heatmap[i, j].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmap[i, j] = dr[border:-border, border:-border].copy()
heatmap[i, j] *= origin_max / np.max(heatmap[i, j])
return heatmap
def dark_parse(self, hm, coord):
heatmap_height = hm.shape[0]
heatmap_width = hm.shape[1]
px = int(coord[0])
py = int(coord[1])
if 1 < px < heatmap_width - 2 and 1 < py < heatmap_height - 2:
dx = 0.5 * (hm[py][px + 1] - hm[py][px - 1])
dy = 0.5 * (hm[py + 1][px] - hm[py - 1][px])
dxx = 0.25 * (hm[py][px + 2] - 2 * hm[py][px] + hm[py][px - 2])
dxy = 0.25 * (hm[py+1][px+1] - hm[py-1][px+1] - hm[py+1][px-1] \
+ hm[py-1][px-1])
dyy = 0.25 * (
hm[py + 2 * 1][px] - 2 * hm[py][px] + hm[py - 2 * 1][px])
derivative = np.matrix([[dx], [dy]])
hessian = np.matrix([[dxx, dxy], [dxy, dyy]])
if dxx * dyy - dxy**2 != 0:
hessianinv = hessian.I
offset = -hessianinv * derivative
offset = np.squeeze(np.array(offset.T), axis=0)
coord += offset
return coord
def dark_postprocess(self, hm, coords, kernelsize):
"""
refer to https://github.com/ilovepose/DarkPose/lib/core/inference.py
"""
hm = self.gaussian_blur(hm, kernelsize)
hm = np.maximum(hm, 1e-10)
hm = np.log(hm)
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
coords[n, p] = self.dark_parse(hm[n][p], coords[n][p])
return coords
def get_final_preds(self, heatmaps, center, scale, kernelsize=3):
"""the highest heatvalue location with a quarter offset in the
direction from the highest response to the second highest response.
Args:
heatmaps (numpy.ndarray): The predicted heatmaps
center (numpy.ndarray): The boxes center
scale (numpy.ndarray): The scale factor
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 1]), the maximum confidence of the keypoints
"""
coords, maxvals = self.get_max_preds(heatmaps)
heatmap_height = heatmaps.shape[2]
heatmap_width = heatmaps.shape[3]
if self.use_dark:
coords = self.dark_postprocess(heatmaps, coords, kernelsize)
else:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:
diff = np.array([
hm[py][px + 1] - hm[py][px - 1],
hm[py + 1][px] - hm[py - 1][px]
])
coords[n][p] += np.sign(diff) * .25
preds = coords.copy()
# Transform back
for i in range(coords.shape[0]):
preds[i] = transform_preds(coords[i], center[i], scale[i],
[heatmap_width, heatmap_height])
return preds, maxvals
def __call__(self, output, center, scale):
preds, maxvals = self.get_final_preds(output, center, scale)
return np.concatenate(
(preds, maxvals), axis=-1), np.mean(
maxvals, axis=1)
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale * 200, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
| PaddleDetection/deploy/serving/python/postprocess_ops.py/0 | {
"file_path": "PaddleDetection/deploy/serving/python/postprocess_ops.py",
"repo_id": "PaddleDetection",
"token_count": 3367
} | 57 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -u
set -o pipefail
# Show usage
function show_usage() {
cat <<EOF
Usage: run_demo.sh
-h, --help
Display this help message.
--cmsis_path CMSIS_PATH
Set path to CMSIS.
--ethosu_platform_path ETHOSU_PLATFORM_PATH
Set path to Arm(R) Ethos(TM)-U core platform.
--fvp_path FVP_PATH
Set path to FVP.
--cmake_path
Set path to cmake.
--enable_FVP
Set 1 to run application on local Fixed Virtual Platforms (FVPs) executables.
EOF
}
# Configure environment variables
FVP_enable=0
export PATH=/opt/arm/gcc-arm-none-eabi/bin:$PATH
# Install python libraries
echo -e "\e[36mInstall python libraries\e[0m"
sudo pip install -r ./requirements.txt
# Parse arguments
while (( $# )); do
case "$1" in
-h|--help)
show_usage
exit 0
;;
--cmsis_path)
if [ $# -gt 1 ]
then
export CMSIS_PATH="$2"
shift 2
else
echo 'ERROR: --cmsis_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
--ethosu_platform_path)
if [ $# -gt 1 ]
then
export ETHOSU_PLATFORM_PATH="$2"
shift 2
else
echo 'ERROR: --ethosu_platform_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
--fvp_path)
if [ $# -gt 1 ]
then
export PATH="$2/models/Linux64_GCC-6.4:$PATH"
shift 2
else
echo 'ERROR: --fvp_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
--cmake_path)
if [ $# -gt 1 ]
then
export CMAKE="$2"
shift 2
else
echo 'ERROR: --cmake_path requires a non-empty argument' >&2
show_usage >&2
exit 1
fi
;;
--enable_FVP)
if [ $# -gt 1 ] && [ "$2" == "1" -o "$2" == "0" ];
then
FVP_enable="$2"
shift 2
else
echo 'ERROR: --enable_FVP requires a right argument 1 or 0' >&2
show_usage >&2
exit 1
fi
;;
-*|--*)
echo "Error: Unknown flag: $1" >&2
show_usage >&2
exit 1
;;
esac
done
# Choose running environment: cloud(default) or local environment
Platform="VHT_Corstone_SSE-300_Ethos-U55"
if [ $FVP_enable == "1" ]; then
Platform="FVP_Corstone_SSE-300_Ethos-U55"
echo -e "\e[36mRun application on local Fixed Virtual Platforms (FVPs)\e[0m"
else
if [ ! -d "/opt/arm/" ]; then
sudo ./configure_avh.sh
fi
fi
# Directories
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Make build directory
make cleanall
mkdir -p build
cd build
# Get PaddlePaddle inference model
echo -e "\e[36mDownload PaddlePaddle inference model\e[0m"
wget https://bj.bcebos.com/v1/paddledet/deploy/Inference/picodet_s_320_coco_lcnet_no_nms.tar
tar -xf picodet_s_320_coco_lcnet_no_nms.tar
# Compile model for Arm(R) Cortex(R)-M55 CPU and CMSIS-NN
# An alternative to using "python3 -m tvm.driver.tvmc" is to call
# "tvmc" directly once TVM has been pip installed.
python3 -m tvm.driver.tvmc compile --target=cmsis-nn,c \
--target-cmsis-nn-mcpu=cortex-m55 \
--target-c-mcpu=cortex-m55 \
--runtime=crt \
--executor=aot \
--executor-aot-interface-api=c \
--executor-aot-unpacked-api=1 \
--pass-config tir.usmp.enable=1 \
--pass-config tir.usmp.algorithm=hill_climb \
--pass-config tir.disable_storage_rewrite=1 \
--pass-config tir.disable_vectorize=1 picodet_s_320_coco_lcnet_no_nms/model.pdmodel \
--output-format=mlf \
--model-format=paddle \
--module-name=picodet \
--input-shapes image:[1,3,320,320] \
--output=picodet.tar
tar -xf picodet.tar
# Create C header files
cd ..
python3 ./convert_image.py ./image/000000014439_640x640.jpg
# Build demo executable
cd ${script_dir}
echo ${script_dir}
make
# Run demo executable on the AVH
$Platform -C cpu0.CFGDTCMSZ=15 \
-C cpu0.CFGITCMSZ=15 -C mps3_board.uart0.out_file=\"-\" -C mps3_board.uart0.shutdown_tag=\"EXITTHESIM\" \
-C mps3_board.visualisation.disable-visualisation=1 -C mps3_board.telnetterminal0.start_telnet=0 \
-C mps3_board.telnetterminal1.start_telnet=0 -C mps3_board.telnetterminal2.start_telnet=0 -C mps3_board.telnetterminal5.start_telnet=0 \
./build/demo --stat
| PaddleDetection/deploy/third_engine/demo_avh/run_demo.sh/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_avh/run_demo.sh",
"repo_id": "PaddleDetection",
"token_count": 2578
} | 58 |
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// reference from https://github.com/RangiLyu/nanodet/tree/main/demo_mnn
#ifndef __PicoDet_H__
#define __PicoDet_H__
#pragma once
#include "Interpreter.hpp"
#include <algorithm>
#include <chrono>
#include <iostream>
#include <memory>
#include <opencv2/opencv.hpp>
#include <string>
#include <vector>
#include "ImageProcess.hpp"
#include "MNNDefine.h"
#include "Tensor.hpp"
typedef struct HeadInfo_ {
std::string cls_layer;
std::string dis_layer;
int stride;
} HeadInfo;
typedef struct BoxInfo_ {
float x1;
float y1;
float x2;
float y2;
float score;
int label;
} BoxInfo;
class PicoDet {
public:
PicoDet(const std::string &mnn_path,
int input_width,
int input_length,
int num_thread_ = 4,
float score_threshold_ = 0.5,
float nms_threshold_ = 0.3);
~PicoDet();
int detect(cv::Mat &img, std::vector<BoxInfo> &result_list);
std::string get_label_str(int label);
private:
void decode_infer(MNN::Tensor *cls_pred,
MNN::Tensor *dis_pred,
int stride,
float threshold,
std::vector<std::vector<BoxInfo>> &results);
BoxInfo disPred2Bbox(
const float *&dfl_det, int label, float score, int x, int y, int stride);
void nms(std::vector<BoxInfo> &input_boxes, float NMS_THRESH);
private:
std::shared_ptr<MNN::Interpreter> PicoDet_interpreter;
MNN::Session *PicoDet_session = nullptr;
MNN::Tensor *input_tensor = nullptr;
int num_thread;
int image_w;
int image_h;
int in_w = 320;
int in_h = 320;
float score_threshold;
float nms_threshold;
const float mean_vals[3] = {103.53f, 116.28f, 123.675f};
const float norm_vals[3] = {0.017429f, 0.017507f, 0.017125f};
const int num_class = 80;
const int reg_max = 7;
std::vector<HeadInfo> heads_info{
// cls_pred|dis_pred|stride
{"save_infer_model/scale_0.tmp_1", "save_infer_model/scale_4.tmp_1", 8},
{"save_infer_model/scale_1.tmp_1", "save_infer_model/scale_5.tmp_1", 16},
{"save_infer_model/scale_2.tmp_1", "save_infer_model/scale_6.tmp_1", 32},
{"save_infer_model/scale_3.tmp_1", "save_infer_model/scale_7.tmp_1", 64},
};
std::vector<std::string> labels{
"person", "bicycle", "car",
"motorcycle", "airplane", "bus",
"train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign",
"parking meter", "bench", "bird",
"cat", "dog", "horse",
"sheep", "cow", "elephant",
"bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag",
"tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball",
"kite", "baseball bat", "baseball glove",
"skateboard", "surfboard", "tennis racket",
"bottle", "wine glass", "cup",
"fork", "knife", "spoon",
"bowl", "banana", "apple",
"sandwich", "orange", "broccoli",
"carrot", "hot dog", "pizza",
"donut", "cake", "chair",
"couch", "potted plant", "bed",
"dining table", "toilet", "tv",
"laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave",
"oven", "toaster", "sink",
"refrigerator", "book", "clock",
"vase", "scissors", "teddy bear",
"hair drier", "toothbrush"};
};
template <typename _Tp>
int activation_function_softmax(const _Tp *src, _Tp *dst, int length);
inline float fast_exp(float x);
inline float sigmoid(float x);
#endif
| PaddleDetection/deploy/third_engine/demo_mnn_kpts/picodet_mnn.h/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_mnn_kpts/picodet_mnn.h",
"repo_id": "PaddleDetection",
"token_count": 1997
} | 59 |
# PicoDet OpenVINO Demo
This fold provides PicoDet inference code using
[Intel's OpenVINO Toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html). Most of the implements in this fold are same as *demo_ncnn*.
**Recommand** to use the xxx.tar.gz file to install instead of github method, [link](https://registrationcenter-download.intel.com/akdlm/irc_nas/18096/l_openvino_toolkit_p_2021.4.689.tgz).
## Install OpenVINO Toolkit
Go to [OpenVINO HomePage](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html)
Download a suitable version and install.
Follow the official Get Started Guides: https://docs.openvinotoolkit.org/latest/get_started_guides.html
## Set the Environment Variables
### Windows:
Run this command in cmd. (Every time before using OpenVINO)
```cmd
<INSTSLL_DIR>\openvino_2021\bin\setupvars.bat
```
Or set the system environment variables once for all:
Name |Value
:--------------------:|:--------:
INTEL_OPENVINO_DIR | <INSTSLL_DIR>\openvino_2021
INTEL_CVSDK_DIR | %INTEL_OPENVINO_DIR%
InferenceEngine_DIR | %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share
HDDL_INSTALL_DIR | %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\hddl
ngraph_DIR | %INTEL_OPENVINO_DIR%\deployment_tools\ngraph\cmake
And add this to ```Path```
```
%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Debug;%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Release;%HDDL_INSTALL_DIR%\bin;%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\bin;%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\lib
```
### Linux
Run this command in shell. (Every time before using OpenVINO)
```shell
source /opt/intel/openvino_2021/bin/setupvars.sh
```
Or edit .bashrc
```shell
vi ~/.bashrc
```
Add this line to the end of the file
```shell
source /opt/intel/openvino_2021/bin/setupvars.sh
```
## Convert model
Convert to OpenVINO
``` shell
cd <INSTSLL_DIR>/openvino_2021/deployment_tools/model_optimizer
```
Install requirements for convert tool
```shell
cd ./install_prerequisites
sudo install_prerequisites_onnx.sh
```
Then convert model. Notice: mean_values and scale_values should be the same with your training settings in YAML config file.
```shell
python3 mo_onnx.py --input_model <ONNX_MODEL> --mean_values [103.53,116.28,123.675] --scale_values [57.375,57.12,58.395]
```
## Build
### Windows
```cmd
<OPENVINO_INSTSLL_DIR>\openvino_2021\bin\setupvars.bat
mkdir -p build
cd build
cmake ..
msbuild picodet_demo.vcxproj /p:configuration=release /p:platform=x64
```
### Linux
```shell
source /opt/intel/openvino_2021/bin/setupvars.sh
mkdir build
cd build
cmake ..
make
```
## Run demo
Download PicoDet openvino model [PicoDet openvino model download link](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_openvino.zip).
move picodet openvino model files to the demo's weight folder.
### Edit file
```
step1:
main.cpp
#define image_size 416
...
auto detector = PicoDet("../weight/picodet_m_416.xml");
...
step2:
picodet_openvino.h
#define image_size 416
```
### Webcam
```shell
picodet_demo 0 0
```
### Inference images
```shell
picodet_demo 1 IMAGE_FOLDER/*.jpg
```
### Inference video
```shell
picodet_demo 2 VIDEO_PATH
```
### Benchmark
```shell
picodet_demo 3 0
```
| PaddleDetection/deploy/third_engine/demo_openvino/README.md/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_openvino/README.md",
"repo_id": "PaddleDetection",
"token_count": 1310
} | 60 |
[简体中文](./idbased_clas.md) | English
# Development for Action Recognition Based on Classification with Human ID
## Environmental Preparation
The model of action recognition based on classification with human id is trained with [PaddleClas](https://github.com/PaddlePaddle/PaddleClas). Please refer to [Install PaddleClas](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/installation/install_paddleclas_en.md) to complete the environment installation for subsequent model training and usage processes.
## Data Preparation
The model of action recognition based on classification with human id directly recognizes the image frames of video, so the model training process is same with the usual image classification model.
### Dataset Download
The action recognition of making phone calls is trained on the public dataset [UAV-Human](https://github.com/SUTDCV/UAV-Human). Please fill in the relevant application materials through this link to obtain the download link.
The RGB video in this dataset is included in the `UAVHuman/ActionRecognition/RGBVideos` path, and the file name of each video is its annotation information.
### Image Processing for Training and Validation
According to the video file name, in which the `A` field (i.e. action) related to action recognition, we can find the action type of the video data that we expect to recognize.
- Positive sample video: Taking phone calls as an example, we just need to find the file containing `A024`.
- Negative sample video: All videos except the target action.
In view of the fact that there will be much redundancy when converting video data into images, for positive sample videos, we sample at intervals of 8 frames, and use the pedestrian detection model to process it into a half-body image (take the upper half of the detection frame, that is, `img = img[: H/2, :, :]`). The image sampled from the positive sample video is regarded as a positive sample, and the sampled image from the negative sample video is regarded as a negative sample.
**Note**: The positive sample video does not completely are the action of making a phone call. There will be some redundant actions at the beginning and end of the video, which need to be removed.
### Preparation for Annotation File
The model of action recognition based on classification with human id is trained with [PaddleClas](https://github.com/PaddlePaddle/PaddleClas). Thus the model trained with this scheme needs to prepare the desired image data and corresponding annotation files. Please refer to [Image Classification Datasets](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/data_preparation/classification_dataset_en.md) to prepare the data. An example of an annotation file is as follows, where `0` and `1` are the corresponding categories of the image:
```
# Each line uses "space" to separate the image path and label
train/000001.jpg 0
train/000002.jpg 0
train/000003.jpg 1
...
```
Additionally, the label file `phone_label_list.txt` helps map category numbers to specific type names:
```
0 make_a_phone_call # type 0
1 normal # type 1
```
After the above content finished, place it to the `dataset` directory, the file structure is as follow:
```
data/
├── images # All images
├── phone_label_list.txt # Label file
├── phone_train_list.txt # Training list, including pictures and their corresponding types
└── phone_val_list.txt # Validation list, including pictures and their corresponding types
```
## Model Optimization
### Detection-Tracking Model Optimization
The performance of action recognition based on classification with human id depends on the pre-order detection and tracking models. If the pedestrian location cannot be accurately detected in the actual scene, or it is difficult to correctly assign the person ID between different frames, the performance of the action recognition part will be limited. If you encounter the above problems in actual use, please refer to [Secondary Development of Detection Task](../detection_en.md) and [Secondary Development of Multi-target Tracking Task](../pphuman_mot_en.md) for detection/track model optimization.
### Half-Body Prediction
In the action of making a phone call, the action classification can be achieved through the upper body image. Therefore, during the training and prediction process, the image is changed from the pedestrian full-body to half-body.
## Add New Action
### Data Preparation
Referring to the previous introduction, complete the data preparation part and place it under `{root of PaddleClas}/dataset`:
```
data/
├── images # All images
├── label_list.txt # Label file
├── train_list.txt # Training list, including pictures and their corresponding types
└── val_list.txt # Validation list, including pictures and their corresponding types
```
Where the training list and validation list file are as follow:
```
# Each line uses "space" to separate the image path and label
train/000001.jpg 0
train/000002.jpg 0
train/000003.jpg 1
train/000004.jpg 2 # For the newly added categories, simply fill in the corresponding category number.
`label_list.txt` should give name of the extension type:
```
0 make_a_phone_call # class 0
1 Your New Action # class 1
...
n normal # class n
```
...
```
### Configuration File Settings
The [training configuration file] (https://github.com/PaddlePaddle/PaddleClas/blob/develop/ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml) has been integrated in PaddleClas. The settings that need to be paid attention to are as follows:
```yaml
# model architecture
Arch:
name: PPHGNet_tiny
class_num: 2 # Corresponding to the number of action categories
...
# Please correctly set image_root and cls_label_path to ensure that the image_root + image path in cls_label_path can access the image correctly
DataLoader:
Train:
dataset:
name: ImageNetDataset
image_root: ./dataset/
cls_label_path: ./dataset/phone_train_list_halfbody.txt
...
Infer:
infer_imgs: docs/images/inference_deployment/whl_demo.jpg
batch_size: 1
transforms:
- DecodeImage:
to_rgb: True
channel_first: False
- ResizeImage:
size: 224
- NormalizeImage:
scale: 1.0/255.0
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
order: ''
- ToCHWImage:
PostProcess:
name: Topk
topk: 2 # Display the number of topks, do not exceed the total number of categories
class_id_map_file: dataset/phone_label_list.txt # path of label_list.txt
```
### Model Training And Evaluation
#### Model Training
Start training with the following command:
```bash
export CUDA_VISIBLE_DEVICES=0,1,2,3
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3" \
tools/train.py \
-c ./ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml \
-o Arch.pretrained=True
```
where `Arch.pretrained=True` is to use pretrained weights to help with training.
#### Model Evaluation
After training the model, use the following command to evaluate the model metrics.
```bash
python3 tools/eval.py \
-c ./ppcls/configs/practical_models/PPHGNet_tiny_calling_halfbody.yaml \
-o Global.pretrained_model=output/PPHGNet_tiny/best_model
```
Where `-o Global.pretrained_model="output/PPHGNet_tiny/best_model"` specifies the path where the current best weight is located. If other weights are needed, just replace the corresponding path.
#### Model Export
For the detailed introduction of model export, please refer to [here](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/en/inference_deployment/export_model_en.md#2-export-classification-model)
You can refer to the following steps:
```python
python tools/export_model.py
-c ./PPHGNet_tiny_calling_halfbody.yaml \
-o Global.pretrained_model=./output/PPHGNet_tiny/best_model \
-o Global.save_inference_dir=./output_inference/PPHGNet_tiny_calling_halfbody
```
Then rename the exported model and add the configuration file to suit the usage of PP-Human.
```bash
cd ./output_inference/PPHGNet_tiny_calling_halfbody
mv inference.pdiparams model.pdiparams
mv inference.pdiparams.info model.pdiparams.info
mv inference.pdmodel model.pdmodel
# Download configuration file for inference
wget https://bj.bcebos.com/v1/paddledet/models/pipeline/infer_configs/PPHGNet_tiny_calling_halfbody/infer_cfg.yml
```
At this point, this model can be used in PP-Human.
### Custom Action Output
In the model of action recognition based on classification with human id, the task is defined as a picture-level classification task of corresponding person. The type of the corresponding classification is finally regarded as the action type of the current stage. Therefore, on the basis of completing the training and deployment of the custom model, it is also necessary to convert the classification model results to the final action recognition results as output, and the displayed result of the visualization should be modified.
Please modify the [postprocessing function](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/deploy/pipeline/pphuman/action_infer.py#L509).
The core code are:
```python
# Get the highest score output of the classification model
cls_id_res = 1
cls_score_res = -1.0
for cls_id in range(len(cls_result[idx])):
score = cls_result[idx][cls_id]
if score > cls_score_res:
cls_id_res = cls_id
cls_score_res = score
# Current now, class 0 is positive, class 1 is negative.
if cls_id_res == 1 or (cls_id_res == 0 and
cls_score_res < self.threshold):
# If the classification result is not the target action or its confidence does not reach the threshold,
# determine the action type of the current frame according to the historical results
history_cls, life_remain, history_score = self.result_history.get(
tracker_id, [1, self.frame_life, -1.0])
cls_id_res = history_cls
cls_score_res = 1 - cls_score_res
life_remain -= 1
if life_remain <= 0 and tracker_id in self.result_history:
del (self.result_history[tracker_id])
elif tracker_id in self.result_history:
self.result_history[tracker_id][1] = life_remain
else:
self.result_history[
tracker_id] = [cls_id_res, life_remain, cls_score_res]
else:
# If the classification result belongs to the target action, use the result and record it in the historical result
self.result_history[
tracker_id] = [cls_id_res, self.frame_life, cls_score_res]
...
```
#### Modify Visual Output
At present, ID-based action recognition is displayed based on the results of action recognition and predefined category names. For the detail, please refer to [here](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/deploy/pipeline/pipeline.py#L1024-L1043). If the custom action needs to be modified to another display name, please modify it accordingly to output the corresponding result.
| PaddleDetection/docs/advanced_tutorials/customization/action_recognotion/idbased_clas_en.md/0 | {
"file_path": "PaddleDetection/docs/advanced_tutorials/customization/action_recognotion/idbased_clas_en.md",
"repo_id": "PaddleDetection",
"token_count": 3385
} | 61 |
简体中文 | [English](./ppvehicle_attribute_en.md)
# 车辆属性识别任务二次开发
## 数据准备
### 数据格式
车辆属性模型采用VeRi数据集的属性,共计10种车辆颜色及9种车型, 具体如下:
```
# 车辆颜色
- "yellow"
- "orange"
- "green"
- "gray"
- "red"
- "blue"
- "white"
- "golden"
- "brown"
- "black"
# 车型
- "sedan"
- "suv"
- "van"
- "hatchback"
- "mpv"
- "pickup"
- "bus"
- "truck"
- "estate"
```
在标注文件中使用长度为19的序列来表示上述属性。
举例:
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
前10位中,位序号0的值为1,表示车辆颜色为`"yellow"`。
后9位中,位序号11的值为1,表示车型为`"suv"`。
### 数据标注
理解了上面`数据格式`的含义后,就可以进行数据标注的工作。其本质是:每张车辆的图片,建立一组长度为19的标注项,分别对应各项属性值。
举例:
对于一张原始图片,
1) 使用检测框,标注图片中每台车辆的位置。
2) 每一个检测框(对应每辆车),包含一组19位的属性值数组,数组的每一位以0或1表示。对应上述19个属性分类。例如,如果颜色是'orange',则数组索引为1的位置值为1,如果车型是'sedan',则数组索引为10的位置值为1。
标注完成后利用检测框将每辆车截取成只包含单辆车的图片,则图片与19位属性标注建立了对应关系。也可先截取再进行标注,效果相同。
## 模型训练
数据标注完成后,就可以拿来做模型的训练,完成自定义模型的优化工作。
其主要有两步工作需要完成:1)将数据与标注数据整理成训练格式。2)修改配置文件开始训练。
### 训练数据格式
训练数据包括训练使用的图片和一个训练列表train.txt,其具体位置在训练配置中指定,其放置方式示例如下:
```
Attribute/
|-- data 训练图片文件夹
| |-- 00001.jpg
| |-- 00002.jpg
| `-- 0000x.jpg
`-- train.txt 训练数据列表
```
train.txt文件内为所有训练图片名称(相对于根路径的文件路径)+ 19个标注值
其每一行表示一辆车的图片和标注结果。其格式为:
```
00001.jpg 0,0,1,0,....
```
注意:1)图片与标注值之间是以Tab[\t]符号隔开, 2)标注值之间是以逗号[,]隔开。该格式不能错,否则解析失败。
### 修改配置开始训练
首先执行以下命令下载训练代码(更多环境问题请参考[Install_PaddleClas](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/installation/install_paddleclas_en.md)):
```shell
git clone https://github.com/PaddlePaddle/PaddleClas
```
需要在[配置文件](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml)中,修改的配置项如下:
```yaml
DataLoader:
Train:
dataset:
name: MultiLabelDataset
image_root: "dataset/VeRi/" # the root path of training images
cls_label_path: "dataset/VeRi/train_list.txt" # the location of the training list file
label_ratio: True
transform_ops:
...
Eval:
dataset:
name: MultiLabelDataset
image_root: "dataset/VeRi/" # the root path of evaluation images
cls_label_path: "dataset/VeRi/val_list.txt" # the location of the evaluation list file
label_ratio: True
transform_ops:
...
```
注意:
1. 这里image_root路径+train.txt中图片相对路径,对应图片的完整路径位置。
2. 如果有修改属性数量,则还需修改内容配置项中属性种类数量:
```yaml
# model architecture
Arch:
name: "PPLCNet_x1_0"
pretrained: True
use_ssld: True
class_num: 19 #属性种类数量
```
然后运行以下命令开始训练。
```
#多卡训练
export CUDA_VISIBLE_DEVICES=0,1,2,3
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3" \
tools/train.py \
-c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml
#单卡训练
python3 tools/train.py \
-c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml
```
训练完成后可以执行以下命令进行性能评估:
```
#多卡评估
export CUDA_VISIBLE_DEVICES=0,1,2,3
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3" \
tools/eval.py \
-c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \
-o Global.pretrained_model=./output/PPLCNet_x1_0/best_model
#单卡评估
python3 tools/eval.py \
-c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \
-o Global.pretrained_model=./output/PPLCNet_x1_0/best_model
```
### 模型导出
使用下述命令将训练好的模型导出为预测部署模型。
```
python3 tools/export_model.py \
-c ./ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml \
-o Global.pretrained_model=output/PPLCNet_x1_0/best_model \
-o Global.save_inference_dir=deploy/models/PPLCNet_x1_0_vehicle_attribute_model
```
导出模型后,如果希望在PP-Vehicle中使用,则需要下载[预测部署模型](https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip),解压并将其中的配置文件`infer_cfg.yml`文件,放置到导出的模型文件夹`PPLCNet_x1_0_vehicle_attribute_model`中。
使用时在PP-Vehicle中的配置文件`./deploy/pipeline/config/infer_cfg_ppvehicle.yml`中修改新的模型路径`model_dir`项,并开启功能`enable: True`。
```
VEHICLE_ATTR:
model_dir: [YOUR_DEPLOY_MODEL_DIR]/PPLCNet_x1_0_vehicle_attribute_infer/ #新导出的模型路径位置
enable: True #开启功能
```
然后可以使用-->至此即完成新增属性类别识别任务。
## 属性增减
该过程与行人属性的增减过程相似,如果需要增加、减少属性数量,则需要:
1)标注时需增加新属性类别信息或删减属性类别信息;
2)对应修改训练中train.txt所使用的属性数量和名称;
3)修改训练配置,例如``PaddleClas/blob/develop/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml``文件中的属性数量,详细见上述`修改配置开始训练`部分。
增加属性示例:
1. 在标注数据时在19位后继续增加新的属性标注数值;
2. 在train.txt文件的标注数值中也增加新的属性数值。
3. 注意属性类型在train.txt中属性数值列表中的位置的对应关系需要固定。
<div width="500" align="center">
<img src="../../images/add_attribute.png"/>
</div>
删减属性同理。
## 修改后处理代码
修改了属性定义后,pipeline后处理部分也需要做相应修改,主要影响结果可视化时的显示结果。
相应代码在[文件](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/deploy/pipeline/ppvehicle/vehicle_attr.py#L108)中`postprocess`函数。
其函数实现说明如下:
```python
# 在类的初始化函数中,定义了颜色/车型的名称
self.color_list = [
"yellow", "orange", "green", "gray", "red", "blue", "white",
"golden", "brown", "black"
]
self.type_list = [
"sedan", "suv", "van", "hatchback", "mpv", "pickup", "bus", "truck",
"estate"
]
...
def postprocess(self, inputs, result):
# postprocess output of predictor
im_results = result['output']
batch_res = []
for res in im_results:
res = res.tolist()
attr_res = []
color_res_str = "Color: "
type_res_str = "Type: "
color_idx = np.argmax(res[:10]) # 前10项表示各项颜色得分,取得分最大项作为颜色结果
type_idx = np.argmax(res[10:]) # 后9项表示各项车型得分,取得分最大项作为车型结果
# 颜色和车型的得分都需要超过对应阈值,否则视为'UnKnown'
if res[color_idx] >= self.color_threshold:
color_res_str += self.color_list[color_idx]
else:
color_res_str += "Unknown"
attr_res.append(color_res_str)
if res[type_idx + 10] >= self.type_threshold:
type_res_str += self.type_list[type_idx]
else:
type_res_str += "Unknown"
attr_res.append(type_res_str)
batch_res.append(attr_res)
result = {'output': batch_res}
return result
```
| PaddleDetection/docs/advanced_tutorials/customization/ppvehicle_attribute.md/0 | {
"file_path": "PaddleDetection/docs/advanced_tutorials/customization/ppvehicle_attribute.md",
"repo_id": "PaddleDetection",
"token_count": 5312
} | 62 |
# 目标检测热力图
## 1.简介
基于backbone/roi特征图计算物体预测框的cam(类激活图), 目前支持基于FasterRCNN/MaskRCNN系列, PPYOLOE系列, 以及BlazeFace, SSD, Retinanet网络。
## 2.使用方法
* 以PP-YOLOE为例,准备好数据之后,指定网络配置文件、模型权重地址和图片路径以及输出文件夹路径,使用脚本调用tools/cam_ppdet.py计算图片中物体预测框的grad_cam热力图。下面为运行脚本示例。
```shell
python tools/cam_ppdet.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --infer_img demo/000000014439.jpg --cam_out cam_ppyoloe --target_feature_layer_name model.backbone -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams
```
* **参数**
| FLAG | 用途 |
|:--------------------------:|:--------------------------------------------------------------------------------------------------------------------------:|
| -c | 指定配置文件 |
| --infer_img | 用于预测的图片路径 |
| --cam_out | 指定输出路径 |
| --target_feature_layer_name | 计算cam的特征图位置, 如model.backbone、 model.bbox_head.roi_extractor |
| -o | 设置或更改配置文件里的参数内容, 如 -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams |
* 运行效果
<center>
<img src="../images/grad_cam_ppyoloe_demo.jpg" width="500" >
</center>
<br><center>cam_ppyoloe/225.jpg</center></br>
## 3. 目前支持基于FasterRCNN/MaskRCNN系列, PPYOLOE系列以及BlazeFace, SSD, Retinanet网络。
* PPYOLOE网络热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml --infer_img demo/000000014439.jpg --cam_out cam_ppyoloe --target_feature_layer_name model.backbone -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams
```
* MaskRCNN网络roi特征热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/mask_rcnn/mask_rcnn_r50_vd_fpn_2x_coco.yml --infer_img demo/000000014439.jpg --cam_out cam_mask_rcnn_roi --target_feature_layer_name model.bbox_head.roi_extractor -o weights=https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_vd_fpn_2x_coco.pdparams
```
* MaskRCNN网络backbone特征的热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/mask_rcnn/mask_rcnn_r50_vd_fpn_2x_coco.yml --infer_img demo/000000014439.jpg --cam_out cam_mask_rcnn_backbone --target_feature_layer_name model.backbone -o weights=https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_vd_fpn_2x_coco.pdparams
```
* FasterRCNN网络基于roi特征的热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/faster_rcnn/faster_rcnn_r50_vd_fpn_2x_coco.yml --infer_img demo/000000014439.jpg --cam_out cam_faster_rcnn_roi --target_feature_layer_name model.bbox_head.roi_extractor -o weights=https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_vd_fpn_ssld_2x_coco.pdparams
```
* FasterRCNN网络基于backbone特征的热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/faster_rcnn/faster_rcnn_r50_vd_fpn_2x_coco.yml --infer_img demo/000000014439.jpg --cam_out cam_faster_rcnn_backbone --target_feature_layer_name model.backbone -o weights=https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_vd_fpn_ssld_2x_coco.pdparams
```
* BlaczeFace网络backbone特征热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/face_detection/blazeface_1000e.yml --infer_img demo/hrnet_demo.jpg --cam_out cam_blazeface --target_feature_layer_name model.backbone -o weights=https://paddledet.bj.bcebos.com/models/blazeface_1000e.pdparams
```
* SSD网络backbone特征热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/ssd/ssd_mobilenet_v1_300_120e_voc.yml --infer_img demo/000000014439.jpg --cam_out cam_ssd --target_feature_layer_name model.backbone -o weights=https://paddledet.bj.bcebos.com/models/ssd_mobilenet_v1_300_120e_voc.pdparams
```
* Retinanet网络backbone特征热图可视化脚本
```bash
python tools/cam_ppdet.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml --infer_img demo/000000014439.jpg --cam_out cam_retinanet --target_feature_layer_name model.backbone -o weights=https://bj.bcebos.com/v1/paddledet/models/retinanet_r50_fpn_2x_coco.pdparams
```
| PaddleDetection/docs/tutorials/GradCAM_cn.md/0 | {
"file_path": "PaddleDetection/docs/tutorials/GradCAM_cn.md",
"repo_id": "PaddleDetection",
"token_count": 2799
} | 63 |
简体中文 | [English](KeyPointAnnoTools_en.md)
# 关键点检测标注工具
## 目录
[LabelMe](#LabelMe)
- [使用说明](#使用说明)
- [安装](#安装)
- [关键点数据说明](#关键点数据说明)
- [图片标注过程](#图片标注过程)
- [标注格式](#标注格式)
- [导出数据格式](#导出数据格式)
- [格式转化总结](#格式转化总结)
- [标注文件(json)-->COCO](#标注文件(json)-->COCO数据集)
## [LabelMe](https://github.com/wkentaro/labelme)
### 使用说明
#### 安装
具体安装操作请参考[LabelMe官方教程](https://github.com/wkentaro/labelme)中的Installation
<details>
<summary><b> Ubuntu</b></summary>
```
sudo apt-get install labelme
# or
sudo pip3 install labelme
# or install standalone executable from:
# https://github.com/wkentaro/labelme/releases
```
</details>
<details>
<summary><b> macOS</b></summary>
```
brew install pyqt # maybe pyqt5
pip install labelme
# or
brew install wkentaro/labelme/labelme # command line interface
# brew install --cask wkentaro/labelme/labelme # app
# or install standalone executable/app from:
# https://github.com/wkentaro/labelme/releases
```
</details>
推荐使用Anaconda的安装方式
```
conda create –name=labelme python=3
conda activate labelme
pip install pyqt5
pip install labelme
```
#### 关键点数据说明
以COCO数据集为例,共需采集17个关键点
```
keypoint indexes:
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
```
#### 图片标注过程
启动labelme后,选择图片文件或者图片所在文件夹
左侧编辑栏选择`create polygons` ,右击图像区域选择标注形状,绘制好关键点后按下回车,弹出新的框填入标注关键点对应的标签
左侧菜单栏点击保存,生成`json`形式的**标注文件**
![操作说明](https://user-images.githubusercontent.com/34162360/178250648-29ee781a-676b-419c-83b1-de1e4e490526.gif)
### 标注格式
#### 导出数据格式
```
#生成标注文件
png/jpeg/jpg-->labelme标注-->json
```
#### 格式转化总结
```
#标注文件转化为COCO数据集格式
json-->labelme2coco.py-->COCO数据集
```
#### 标注文件(json)-->COCO数据集
使用[PaddleDetection提供的x2coco.py](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/tools/x2coco.py) 将labelme标注的数据转换为COCO数据集形式
```bash
python tools/x2coco.py \
--dataset_type labelme \
--json_input_dir ./labelme_annos/ \
--image_input_dir ./labelme_imgs/ \
--output_dir ./cocome/ \
--train_proportion 0.8 \
--val_proportion 0.2 \
--test_proportion 0.0
```
用户数据集转成COCO数据后目录结构如下(注意数据集中路径名、文件名尽量不要使用中文,避免中文编码问题导致出错):
```
dataset/xxx/
├── annotations
│ ├── train.json # coco数据的标注文件
│ ├── valid.json # coco数据的标注文件
├── images
│ ├── xxx1.jpg
│ ├── xxx2.jpg
│ ├── xxx3.jpg
│ | ...
...
```
| PaddleDetection/docs/tutorials/data/KeyPointAnnoTools.md/0 | {
"file_path": "PaddleDetection/docs/tutorials/data/KeyPointAnnoTools.md",
"repo_id": "PaddleDetection",
"token_count": 1989
} | 64 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this code is base on https://github.com/open-mmlab/mmpose
"""
import os
import cv2
import numpy as np
import json
import copy
import pycocotools
from pycocotools.coco import COCO
from .dataset import DetDataset
from ppdet.core.workspace import register, serializable
@serializable
class KeypointBottomUpBaseDataset(DetDataset):
"""Base class for bottom-up datasets.
All datasets should subclass it.
All subclasses should overwrite:
Methods:`_get_imganno`
Args:
dataset_dir (str): Root path to the dataset.
anno_path (str): Relative path to the annotation file.
image_dir (str): Path to a directory where images are held.
Default: None.
num_joints (int): keypoint numbers
transform (composed(operators)): A sequence of data transforms.
shard (list): [rank, worldsize], the distributed env params
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
transform=[],
shard=[0, 1],
test_mode=False):
super().__init__(dataset_dir, image_dir, anno_path)
self.image_info = {}
self.ann_info = {}
self.img_prefix = os.path.join(dataset_dir, image_dir)
self.transform = transform
self.test_mode = test_mode
self.ann_info['num_joints'] = num_joints
self.img_ids = []
def parse_dataset(self):
pass
def __len__(self):
"""Get dataset length."""
return len(self.img_ids)
def _get_imganno(self, idx):
"""Get anno for a single image."""
raise NotImplementedError
def __getitem__(self, idx):
"""Prepare image for training given the index."""
records = copy.deepcopy(self._get_imganno(idx))
records['image'] = cv2.imread(records['image_file'])
records['image'] = cv2.cvtColor(records['image'], cv2.COLOR_BGR2RGB)
if 'mask' in records:
records['mask'] = (records['mask'] + 0).astype('uint8')
records = self.transform(records)
return records
def parse_dataset(self):
return
@register
@serializable
class KeypointBottomUpCocoDataset(KeypointBottomUpBaseDataset):
"""COCO dataset for bottom-up pose estimation.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
COCO keypoint indexes::
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
Args:
dataset_dir (str): Root path to the dataset.
anno_path (str): Relative path to the annotation file.
image_dir (str): Path to a directory where images are held.
Default: None.
num_joints (int): keypoint numbers
transform (composed(operators)): A sequence of data transforms.
shard (list): [rank, worldsize], the distributed env params
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
transform=[],
shard=[0, 1],
test_mode=False,
return_mask=True,
return_bbox=True,
return_area=True,
return_class=True):
super().__init__(dataset_dir, image_dir, anno_path, num_joints,
transform, shard, test_mode)
self.ann_file = os.path.join(dataset_dir, anno_path)
self.shard = shard
self.test_mode = test_mode
self.return_mask = return_mask
self.return_bbox = return_bbox
self.return_area = return_area
self.return_class = return_class
def parse_dataset(self):
self.coco = COCO(self.ann_file)
self.img_ids = self.coco.getImgIds()
if not self.test_mode:
self.img_ids_tmp = []
for img_id in self.img_ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id)
anno = self.coco.loadAnns(ann_ids)
anno = [obj for obj in anno if obj['iscrowd'] == 0]
if len(anno) == 0:
continue
self.img_ids_tmp.append(img_id)
self.img_ids = self.img_ids_tmp
blocknum = int(len(self.img_ids) / self.shard[1])
self.img_ids = self.img_ids[(blocknum * self.shard[0]):(blocknum * (
self.shard[0] + 1))]
self.num_images = len(self.img_ids)
self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)
self.dataset_name = 'coco'
cat_ids = self.coco.getCatIds()
self.catid2clsid = dict({catid: i for i, catid in enumerate(cat_ids)})
print('=> num_images: {}'.format(self.num_images))
@staticmethod
def _get_mapping_id_name(imgs):
"""
Args:
imgs (dict): dict of image info.
Returns:
tuple: Image name & id mapping dicts.
- id2name (dict): Mapping image id to name.
- name2id (dict): Mapping image name to id.
"""
id2name = {}
name2id = {}
for image_id, image in imgs.items():
file_name = image['file_name']
id2name[image_id] = file_name
name2id[file_name] = image_id
return id2name, name2id
def _get_imganno(self, idx):
"""Get anno for a single image.
Args:
idx (int): image idx
Returns:
dict: info for model training
"""
coco = self.coco
img_id = self.img_ids[idx]
ann_ids = coco.getAnnIds(imgIds=img_id)
anno = coco.loadAnns(ann_ids)
anno = [
obj for obj in anno
if obj['iscrowd'] == 0 and obj['num_keypoints'] > 0
]
db_rec = {}
joints, orgsize = self._get_joints(anno, idx)
db_rec['gt_joints'] = joints
db_rec['im_shape'] = orgsize
if self.return_bbox:
db_rec['gt_bbox'] = self._get_bboxs(anno, idx)
if self.return_class:
db_rec['gt_class'] = self._get_labels(anno, idx)
if self.return_area:
db_rec['gt_areas'] = self._get_areas(anno, idx)
if self.return_mask:
db_rec['mask'] = self._get_mask(anno, idx)
db_rec['im_id'] = img_id
db_rec['image_file'] = os.path.join(self.img_prefix,
self.id2name[img_id])
return db_rec
def _get_joints(self, anno, idx):
"""Get joints for all people in an image."""
num_people = len(anno)
joints = np.zeros(
(num_people, self.ann_info['num_joints'], 3), dtype=np.float32)
for i, obj in enumerate(anno):
joints[i, :self.ann_info['num_joints'], :3] = \
np.array(obj['keypoints']).reshape([-1, 3])
img_info = self.coco.loadImgs(self.img_ids[idx])[0]
orgsize = np.array([img_info['height'], img_info['width'], 1])
return joints, orgsize
def _get_bboxs(self, anno, idx):
num_people = len(anno)
gt_bboxes = np.zeros((num_people, 4), dtype=np.float32)
for idx, obj in enumerate(anno):
if 'bbox' in obj:
gt_bboxes[idx, :] = obj['bbox']
gt_bboxes[:, 2] += gt_bboxes[:, 0]
gt_bboxes[:, 3] += gt_bboxes[:, 1]
return gt_bboxes
def _get_labels(self, anno, idx):
num_people = len(anno)
gt_labels = np.zeros((num_people, 1), dtype=np.float32)
for idx, obj in enumerate(anno):
if 'category_id' in obj:
catid = obj['category_id']
gt_labels[idx, 0] = self.catid2clsid[catid]
return gt_labels
def _get_areas(self, anno, idx):
num_people = len(anno)
gt_areas = np.zeros((num_people, ), dtype=np.float32)
for idx, obj in enumerate(anno):
if 'area' in obj:
gt_areas[idx, ] = obj['area']
return gt_areas
def _get_mask(self, anno, idx):
"""Get ignore masks to mask out losses."""
coco = self.coco
img_info = coco.loadImgs(self.img_ids[idx])[0]
m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)
for obj in anno:
if 'segmentation' in obj:
if obj['iscrowd']:
rle = pycocotools.mask.frPyObjects(obj['segmentation'],
img_info['height'],
img_info['width'])
m += pycocotools.mask.decode(rle)
elif obj['num_keypoints'] == 0:
rles = pycocotools.mask.frPyObjects(obj['segmentation'],
img_info['height'],
img_info['width'])
for rle in rles:
m += pycocotools.mask.decode(rle)
return m < 0.5
@register
@serializable
class KeypointBottomUpCrowdPoseDataset(KeypointBottomUpCocoDataset):
"""CrowdPose dataset for bottom-up pose estimation.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
CrowdPose keypoint indexes::
0: 'left_shoulder',
1: 'right_shoulder',
2: 'left_elbow',
3: 'right_elbow',
4: 'left_wrist',
5: 'right_wrist',
6: 'left_hip',
7: 'right_hip',
8: 'left_knee',
9: 'right_knee',
10: 'left_ankle',
11: 'right_ankle',
12: 'top_head',
13: 'neck'
Args:
dataset_dir (str): Root path to the dataset.
anno_path (str): Relative path to the annotation file.
image_dir (str): Path to a directory where images are held.
Default: None.
num_joints (int): keypoint numbers
transform (composed(operators)): A sequence of data transforms.
shard (list): [rank, worldsize], the distributed env params
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
transform=[],
shard=[0, 1],
test_mode=False):
super().__init__(dataset_dir, image_dir, anno_path, num_joints,
transform, shard, test_mode)
self.ann_file = os.path.join(dataset_dir, anno_path)
self.shard = shard
self.test_mode = test_mode
def parse_dataset(self):
self.coco = COCO(self.ann_file)
self.img_ids = self.coco.getImgIds()
if not self.test_mode:
self.img_ids = [
img_id for img_id in self.img_ids
if len(self.coco.getAnnIds(
imgIds=img_id, iscrowd=None)) > 0
]
blocknum = int(len(self.img_ids) / self.shard[1])
self.img_ids = self.img_ids[(blocknum * self.shard[0]):(blocknum * (
self.shard[0] + 1))]
self.num_images = len(self.img_ids)
self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)
self.dataset_name = 'crowdpose'
print('=> num_images: {}'.format(self.num_images))
@serializable
class KeypointTopDownBaseDataset(DetDataset):
"""Base class for top_down datasets.
All datasets should subclass it.
All subclasses should overwrite:
Methods:`_get_db`
Args:
dataset_dir (str): Root path to the dataset.
image_dir (str): Path to a directory where images are held.
anno_path (str): Relative path to the annotation file.
num_joints (int): keypoint numbers
transform (composed(operators)): A sequence of data transforms.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
transform=[]):
super().__init__(dataset_dir, image_dir, anno_path)
self.image_info = {}
self.ann_info = {}
self.img_prefix = os.path.join(dataset_dir, image_dir)
self.transform = transform
self.ann_info['num_joints'] = num_joints
self.db = []
def __len__(self):
"""Get dataset length."""
return len(self.db)
def _get_db(self):
"""Get a sample"""
raise NotImplementedError
def __getitem__(self, idx):
"""Prepare sample for training given the index."""
records = copy.deepcopy(self.db[idx])
records['image'] = cv2.imread(records['image_file'], cv2.IMREAD_COLOR |
cv2.IMREAD_IGNORE_ORIENTATION)
records['image'] = cv2.cvtColor(records['image'], cv2.COLOR_BGR2RGB)
records['score'] = records['score'] if 'score' in records else 1
records = self.transform(records)
# print('records', records)
return records
@register
@serializable
class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):
"""COCO dataset for top-down pose estimation.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
COCO keypoint indexes:
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
Args:
dataset_dir (str): Root path to the dataset.
image_dir (str): Path to a directory where images are held.
anno_path (str): Relative path to the annotation file.
num_joints (int): Keypoint numbers
trainsize (list):[w, h] Image target size
transform (composed(operators)): A sequence of data transforms.
bbox_file (str): Path to a detection bbox file
Default: None.
use_gt_bbox (bool): Whether to use ground truth bbox
Default: True.
pixel_std (int): The pixel std of the scale
Default: 200.
image_thre (float): The threshold to filter the detection box
Default: 0.0.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
trainsize,
transform=[],
bbox_file=None,
use_gt_bbox=True,
pixel_std=200,
image_thre=0.0,
center_scale=None):
super().__init__(dataset_dir, image_dir, anno_path, num_joints,
transform)
self.bbox_file = bbox_file
self.use_gt_bbox = use_gt_bbox
self.trainsize = trainsize
self.pixel_std = pixel_std
self.image_thre = image_thre
self.center_scale = center_scale
self.dataset_name = 'coco'
def parse_dataset(self):
if self.use_gt_bbox:
self.db = self._load_coco_keypoint_annotations()
else:
self.db = self._load_coco_person_detection_results()
def _load_coco_keypoint_annotations(self):
coco = COCO(self.get_anno())
img_ids = coco.getImgIds()
gt_db = []
for index in img_ids:
im_ann = coco.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
file_name = im_ann['file_name']
im_id = int(im_ann["id"])
annIds = coco.getAnnIds(imgIds=index, iscrowd=False)
objs = coco.loadAnns(annIds)
valid_objs = []
for obj in objs:
x, y, w, h = obj['bbox']
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
valid_objs.append(obj)
objs = valid_objs
rec = []
for obj in objs:
if max(obj['keypoints']) == 0:
continue
joints = np.zeros(
(self.ann_info['num_joints'], 3), dtype=np.float32)
joints_vis = np.zeros(
(self.ann_info['num_joints'], 3), dtype=np.float32)
for ipt in range(self.ann_info['num_joints']):
joints[ipt, 0] = obj['keypoints'][ipt * 3 + 0]
joints[ipt, 1] = obj['keypoints'][ipt * 3 + 1]
joints[ipt, 2] = 0
t_vis = obj['keypoints'][ipt * 3 + 2]
if t_vis > 1:
t_vis = 1
joints_vis[ipt, 0] = t_vis
joints_vis[ipt, 1] = t_vis
joints_vis[ipt, 2] = 0
center, scale = self._box2cs(obj['clean_bbox'][:4])
rec.append({
'image_file': os.path.join(self.img_prefix, file_name),
'center': center,
'scale': scale,
'gt_joints': joints,
'joints_vis': joints_vis,
'im_id': im_id,
})
gt_db.extend(rec)
return gt_db
def _box2cs(self, box):
x, y, w, h = box[:4]
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
aspect_ratio = self.trainsize[0] * 1.0 / self.trainsize[1]
if self.center_scale is not None and np.random.rand() < 0.3:
center += self.center_scale * (np.random.rand(2) - 0.5) * [w, h]
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def _load_coco_person_detection_results(self):
all_boxes = None
bbox_file_path = os.path.join(self.dataset_dir, self.bbox_file)
with open(bbox_file_path, 'r') as f:
all_boxes = json.load(f)
if not all_boxes:
print('=> Load %s fail!' % bbox_file_path)
return None
kpt_db = []
for n_img in range(0, len(all_boxes)):
det_res = all_boxes[n_img]
if det_res['category_id'] != 1:
continue
file_name = det_res[
'filename'] if 'filename' in det_res else '%012d.jpg' % det_res[
'image_id']
img_name = os.path.join(self.img_prefix, file_name)
box = det_res['bbox']
score = det_res['score']
im_id = int(det_res['image_id'])
if score < self.image_thre:
continue
center, scale = self._box2cs(box)
joints = np.zeros(
(self.ann_info['num_joints'], 3), dtype=np.float32)
joints_vis = np.ones(
(self.ann_info['num_joints'], 3), dtype=np.float32)
kpt_db.append({
'image_file': img_name,
'im_id': im_id,
'center': center,
'scale': scale,
'score': score,
'gt_joints': joints,
'joints_vis': joints_vis,
})
return kpt_db
@register
@serializable
class KeypointTopDownCocoWholeBodyHandDataset(KeypointTopDownBaseDataset):
"""CocoWholeBody dataset for top-down hand pose estimation.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
COCO-WholeBody Hand keypoint indexes:
0: 'wrist',
1: 'thumb1',
2: 'thumb2',
3: 'thumb3',
4: 'thumb4',
5: 'forefinger1',
6: 'forefinger2',
7: 'forefinger3',
8: 'forefinger4',
9: 'middle_finger1',
10: 'middle_finger2',
11: 'middle_finger3',
12: 'middle_finger4',
13: 'ring_finger1',
14: 'ring_finger2',
15: 'ring_finger3',
16: 'ring_finger4',
17: 'pinky_finger1',
18: 'pinky_finger2',
19: 'pinky_finger3',
20: 'pinky_finger4'
Args:
dataset_dir (str): Root path to the dataset.
image_dir (str): Path to a directory where images are held.
anno_path (str): Relative path to the annotation file.
num_joints (int): Keypoint numbers
trainsize (list):[w, h] Image target size
transform (composed(operators)): A sequence of data transforms.
pixel_std (int): The pixel std of the scale
Default: 200.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
trainsize,
transform=[],
pixel_std=200):
super().__init__(dataset_dir, image_dir, anno_path, num_joints,
transform)
self.trainsize = trainsize
self.pixel_std = pixel_std
self.dataset_name = 'coco_wholebady_hand'
def _box2cs(self, box):
x, y, w, h = box[:4]
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
aspect_ratio = self.trainsize[0] * 1.0 / self.trainsize[1]
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def parse_dataset(self):
gt_db = []
num_joints = self.ann_info['num_joints']
coco = COCO(self.get_anno())
img_ids = list(coco.imgs.keys())
for img_id in img_ids:
im_ann = coco.loadImgs(img_id)[0]
image_file = os.path.join(self.img_prefix, im_ann['file_name'])
im_id = int(im_ann["id"])
ann_ids = coco.getAnnIds(imgIds=img_id, iscrowd=False)
objs = coco.loadAnns(ann_ids)
for obj in objs:
for type in ['left', 'right']:
if (obj[f'{type}hand_valid'] and
max(obj[f'{type}hand_kpts']) > 0):
joints = np.zeros((num_joints, 3), dtype=np.float32)
joints_vis = np.zeros((num_joints, 3), dtype=np.float32)
keypoints = np.array(obj[f'{type}hand_kpts'])
keypoints = keypoints.reshape(-1, 3)
joints[:, :2] = keypoints[:, :2]
joints_vis[:, :2] = np.minimum(1, keypoints[:, 2:3])
center, scale = self._box2cs(obj[f'{type}hand_box'][:4])
gt_db.append({
'image_file': image_file,
'center': center,
'scale': scale,
'gt_joints': joints,
'joints_vis': joints_vis,
'im_id': im_id,
})
self.db = gt_db
@register
@serializable
class KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset):
"""MPII dataset for topdown pose estimation.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
MPII keypoint indexes::
0: 'right_ankle',
1: 'right_knee',
2: 'right_hip',
3: 'left_hip',
4: 'left_knee',
5: 'left_ankle',
6: 'pelvis',
7: 'thorax',
8: 'upper_neck',
9: 'head_top',
10: 'right_wrist',
11: 'right_elbow',
12: 'right_shoulder',
13: 'left_shoulder',
14: 'left_elbow',
15: 'left_wrist',
Args:
dataset_dir (str): Root path to the dataset.
image_dir (str): Path to a directory where images are held.
anno_path (str): Relative path to the annotation file.
num_joints (int): Keypoint numbers
trainsize (list):[w, h] Image target size
transform (composed(operators)): A sequence of data transforms.
"""
def __init__(self,
dataset_dir,
image_dir,
anno_path,
num_joints,
transform=[]):
super().__init__(dataset_dir, image_dir, anno_path, num_joints,
transform)
self.dataset_name = 'mpii'
def parse_dataset(self):
with open(self.get_anno()) as anno_file:
anno = json.load(anno_file)
gt_db = []
for a in anno:
image_name = a['image']
im_id = a['image_id'] if 'image_id' in a else int(
os.path.splitext(image_name)[0])
c = np.array(a['center'], dtype=np.float32)
s = np.array([a['scale'], a['scale']], dtype=np.float32)
# Adjust center/scale slightly to avoid cropping limbs
if c[0] != -1:
c[1] = c[1] + 15 * s[1]
s = s * 1.25
c = c - 1
joints = np.zeros(
(self.ann_info['num_joints'], 3), dtype=np.float32)
joints_vis = np.zeros(
(self.ann_info['num_joints'], 3), dtype=np.float32)
if 'gt_joints' in a:
joints_ = np.array(a['gt_joints'])
joints_[:, 0:2] = joints_[:, 0:2] - 1
joints_vis_ = np.array(a['joints_vis'])
assert len(joints_) == self.ann_info[
'num_joints'], 'joint num diff: {} vs {}'.format(
len(joints_), self.ann_info['num_joints'])
joints[:, 0:2] = joints_[:, 0:2]
joints_vis[:, 0] = joints_vis_[:]
joints_vis[:, 1] = joints_vis_[:]
gt_db.append({
'image_file': os.path.join(self.img_prefix, image_name),
'im_id': im_id,
'center': c,
'scale': s,
'gt_joints': joints,
'joints_vis': joints_vis
})
print("number length: {}".format(len(gt_db)))
self.db = gt_db
| PaddleDetection/ppdet/data/source/keypoint_coco.py/0 | {
"file_path": "PaddleDetection/ppdet/data/source/keypoint_coco.py",
"repo_id": "PaddleDetection",
"token_count": 15010
} | 65 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# function:
# operators to process sample,
# eg: decode/resize/crop image
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
from numbers import Number, Integral
import uuid
import random
import math
import numpy as np
import os
import copy
import logging
import cv2
from PIL import Image, ImageDraw, ImageEnhance
import pickle
import threading
MUTEX = threading.Lock()
import paddle
from ppdet.core.workspace import serializable
from ..reader import Compose
from .op_helper import (satisfy_sample_constraint, filter_and_process,
generate_sample_bbox, clip_bbox, data_anchor_sampling,
satisfy_sample_constraint_coverage, crop_image_sampling,
generate_sample_bbox_square, bbox_area_sampling,
is_poly, get_border)
from ppdet.utils.logger import setup_logger
from ppdet.utils.compact import imagedraw_textsize_c
from ppdet.modeling.keypoint_utils import get_affine_transform, affine_transform
logger = setup_logger(__name__)
registered_ops = []
def register_op(cls):
registered_ops.append(cls.__name__)
if not hasattr(BaseOperator, cls.__name__):
setattr(BaseOperator, cls.__name__, cls)
else:
raise KeyError("The {} class has been registered.".format(cls.__name__))
return serializable(cls)
class BboxError(ValueError):
pass
class ImageError(ValueError):
pass
class BaseOperator(object):
def __init__(self, name=None):
if name is None:
name = self.__class__.__name__
self._id = name + '_' + str(uuid.uuid4())[-6:]
def apply(self, sample, context=None):
""" Process a sample.
Args:
sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
context (dict): info about this sample processing
Returns:
result (dict): a processed sample
"""
return sample
def __call__(self, sample, context=None):
""" Process a sample.
Args:
sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}
context (dict): info about this sample processing
Returns:
result (dict): a processed sample
"""
if isinstance(sample, Sequence):
for i in range(len(sample)):
sample[i] = self.apply(sample[i], context)
else:
sample = self.apply(sample, context)
return sample
def __str__(self):
return str(self._id)
@register_op
class Decode(BaseOperator):
def __init__(self):
""" Transform the image data to numpy format following the rgb format
"""
super(Decode, self).__init__()
def apply(self, sample, context=None):
""" load image if 'im_file' field is not empty but 'image' is"""
if 'image' not in sample:
with open(sample['im_file'], 'rb') as f:
sample['image'] = f.read()
sample.pop('im_file')
try:
im = sample['image']
data = np.frombuffer(im, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
if 'keep_ori_im' in sample and sample['keep_ori_im']:
sample['ori_image'] = im
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
except:
im = sample['image']
sample['image'] = im
if 'h' not in sample:
sample['h'] = im.shape[0]
elif sample['h'] != im.shape[0]:
logger.warning(
"The actual image height: {} is not equal to the "
"height: {} in annotation, and update sample['h'] by actual "
"image height.".format(im.shape[0], sample['h']))
sample['h'] = im.shape[0]
if 'w' not in sample:
sample['w'] = im.shape[1]
elif sample['w'] != im.shape[1]:
logger.warning(
"The actual image width: {} is not equal to the "
"width: {} in annotation, and update sample['w'] by actual "
"image width.".format(im.shape[1], sample['w']))
sample['w'] = im.shape[1]
sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return sample
def _make_dirs(dirname):
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
Path(dirname).mkdir(exist_ok=True)
@register_op
class DecodeCache(BaseOperator):
def __init__(self, cache_root=None):
'''decode image and caching
'''
super(DecodeCache, self).__init__()
self.use_cache = False if cache_root is None else True
self.cache_root = cache_root
if cache_root is not None:
_make_dirs(cache_root)
def apply(self, sample, context=None):
if self.use_cache and os.path.exists(
self.cache_path(self.cache_root, sample['im_file'])):
path = self.cache_path(self.cache_root, sample['im_file'])
im = self.load(path)
else:
if 'image' not in sample:
with open(sample['im_file'], 'rb') as f:
sample['image'] = f.read()
im = sample['image']
data = np.frombuffer(im, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
if 'keep_ori_im' in sample and sample['keep_ori_im']:
sample['ori_image'] = im
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if self.use_cache and not os.path.exists(
self.cache_path(self.cache_root, sample['im_file'])):
path = self.cache_path(self.cache_root, sample['im_file'])
self.dump(im, path)
sample['image'] = im
sample['h'] = im.shape[0]
sample['w'] = im.shape[1]
sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
sample.pop('im_file')
return sample
@staticmethod
def cache_path(dir_oot, im_file):
return os.path.join(dir_oot, os.path.basename(im_file) + '.pkl')
@staticmethod
def load(path):
with open(path, 'rb') as f:
im = pickle.load(f)
return im
@staticmethod
def dump(obj, path):
MUTEX.acquire()
try:
with open(path, 'wb') as f:
pickle.dump(obj, f)
except Exception as e:
logger.warning('dump {} occurs exception {}'.format(path, str(e)))
finally:
MUTEX.release()
@register_op
class SniperDecodeCrop(BaseOperator):
def __init__(self):
super(SniperDecodeCrop, self).__init__()
def __call__(self, sample, context=None):
if 'image' not in sample:
with open(sample['im_file'], 'rb') as f:
sample['image'] = f.read()
sample.pop('im_file')
im = sample['image']
data = np.frombuffer(im, dtype='uint8')
im = cv2.imdecode(data, cv2.IMREAD_COLOR) # BGR mode, but need RGB mode
if 'keep_ori_im' in sample and sample['keep_ori_im']:
sample['ori_image'] = im
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
chip = sample['chip']
x1, y1, x2, y2 = [int(xi) for xi in chip]
im = im[max(y1, 0):min(y2, im.shape[0]), max(x1, 0):min(x2, im.shape[
1]), :]
sample['image'] = im
h = im.shape[0]
w = im.shape[1]
# sample['im_info'] = [h, w, 1.0]
sample['h'] = h
sample['w'] = w
sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return sample
@register_op
class Permute(BaseOperator):
def __init__(self):
"""
Change the channel to be (C, H, W)
"""
super(Permute, self).__init__()
def apply(self, sample, context=None):
im = sample['image']
im = im.transpose((2, 0, 1))
sample['image'] = im
if 'pre_image' in sample:
pre_im = sample['pre_image']
pre_im = pre_im.transpose((2, 0, 1))
sample['pre_image'] = pre_im
return sample
@register_op
class Lighting(BaseOperator):
"""
Lighting the image by eigenvalues and eigenvectors
Args:
eigval (list): eigenvalues
eigvec (list): eigenvectors
alphastd (float): random weight of lighting, 0.1 by default
"""
def __init__(self, eigval, eigvec, alphastd=0.1):
super(Lighting, self).__init__()
self.alphastd = alphastd
self.eigval = np.array(eigval).astype('float32')
self.eigvec = np.array(eigvec).astype('float32')
def apply(self, sample, context=None):
alpha = np.random.normal(scale=self.alphastd, size=(3, ))
sample['image'] += np.dot(self.eigvec, self.eigval * alpha)
if 'pre_image' in sample:
sample['pre_image'] += np.dot(self.eigvec, self.eigval * alpha)
return sample
@register_op
class RandomErasingImage(BaseOperator):
def __init__(self, prob=0.5, lower=0.02, higher=0.4, aspect_ratio=0.3):
"""
Random Erasing Data Augmentation, see https://arxiv.org/abs/1708.04896
Args:
prob (float): probability to carry out random erasing
lower (float): lower limit of the erasing area ratio
higher (float): upper limit of the erasing area ratio
aspect_ratio (float): aspect ratio of the erasing region
"""
super(RandomErasingImage, self).__init__()
self.prob = prob
self.lower = lower
self.higher = higher
self.aspect_ratio = aspect_ratio
def apply(self, sample, context=None):
gt_bbox = sample['gt_bbox']
im = sample['image']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image is not a numpy array.".format(self))
if len(im.shape) != 3:
raise ImageError("{}: image is not 3-dimensional.".format(self))
for idx in range(gt_bbox.shape[0]):
if self.prob <= np.random.rand():
continue
x1, y1, x2, y2 = gt_bbox[idx, :]
w_bbox = x2 - x1
h_bbox = y2 - y1
area = w_bbox * h_bbox
target_area = random.uniform(self.lower, self.higher) * area
aspect_ratio = random.uniform(self.aspect_ratio,
1 / self.aspect_ratio)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < w_bbox and h < h_bbox:
off_y1 = random.randint(0, int(h_bbox - h))
off_x1 = random.randint(0, int(w_bbox - w))
im[int(y1 + off_y1):int(y1 + off_y1 + h), int(x1 + off_x1):int(
x1 + off_x1 + w), :] = 0
sample['image'] = im
return sample
@register_op
class NormalizeImage(BaseOperator):
def __init__(self,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
is_scale=True,
norm_type='mean_std'):
"""
Args:
mean (list): the pixel mean
std (list): the pixel variance
is_scale (bool): scale the pixel to [0,1]
norm_type (str): type in ['mean_std', 'none']
"""
super(NormalizeImage, self).__init__()
self.mean = mean
self.std = std
self.is_scale = is_scale
self.norm_type = norm_type
if not (isinstance(self.mean, list) and isinstance(self.std, list) and
isinstance(self.is_scale, bool) and
self.norm_type in ['mean_std', 'none']):
raise TypeError("{}: input type is invalid.".format(self))
from functools import reduce
if reduce(lambda x, y: x * y, self.std) == 0:
raise ValueError('{}: std is invalid!'.format(self))
def apply(self, sample, context=None):
"""Normalize the image.
Operators:
1.(optional) Scale the pixel to [0,1]
2.(optional) Each pixel minus mean and is divided by std
"""
im = sample['image']
im = im.astype(np.float32, copy=False)
if self.is_scale:
scale = 1.0 / 255.0
im *= scale
if self.norm_type == 'mean_std':
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im -= mean
im /= std
sample['image'] = im
if 'pre_image' in sample:
pre_im = sample['pre_image']
pre_im = pre_im.astype(np.float32, copy=False)
if self.is_scale:
scale = 1.0 / 255.0
pre_im *= scale
if self.norm_type == 'mean_std':
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
pre_im -= mean
pre_im /= std
sample['pre_image'] = pre_im
return sample
@register_op
class GridMask(BaseOperator):
def __init__(self,
use_h=True,
use_w=True,
rotate=1,
offset=False,
ratio=0.5,
mode=1,
prob=0.7,
upper_iter=360000):
"""
GridMask Data Augmentation, see https://arxiv.org/abs/2001.04086
Args:
use_h (bool): whether to mask vertically
use_w (boo;): whether to mask horizontally
rotate (float): angle for the mask to rotate
offset (float): mask offset
ratio (float): mask ratio
mode (int): gridmask mode
prob (float): max probability to carry out gridmask
upper_iter (int): suggested to be equal to global max_iter
"""
super(GridMask, self).__init__()
self.use_h = use_h
self.use_w = use_w
self.rotate = rotate
self.offset = offset
self.ratio = ratio
self.mode = mode
self.prob = prob
self.upper_iter = upper_iter
from .gridmask_utils import Gridmask
self.gridmask_op = Gridmask(
use_h,
use_w,
rotate=rotate,
offset=offset,
ratio=ratio,
mode=mode,
prob=prob,
upper_iter=upper_iter)
def apply(self, sample, context=None):
sample['image'] = self.gridmask_op(sample['image'], sample['curr_iter'])
return sample
@register_op
class RandomDistort(BaseOperator):
"""Random color distortion.
Args:
hue (list): hue settings. in [lower, upper, probability] format.
saturation (list): saturation settings. in [lower, upper, probability] format.
contrast (list): contrast settings. in [lower, upper, probability] format.
brightness (list): brightness settings. in [lower, upper, probability] format.
random_apply (bool): whether to apply in random (yolo) or fixed (SSD) order.
count (int): the number of doing distrot.
random_channel (bool): whether to swap channels randomly.
prob (float): the probability of enhancing the sample.
"""
def __init__(self,
hue=[-18, 18, 0.5],
saturation=[0.5, 1.5, 0.5],
contrast=[0.5, 1.5, 0.5],
brightness=[0.5, 1.5, 0.5],
random_apply=True,
count=4,
random_channel=False,
prob=1.0):
super(RandomDistort, self).__init__()
self.hue = hue
self.saturation = saturation
self.contrast = contrast
self.brightness = brightness
self.random_apply = random_apply
self.count = count
self.random_channel = random_channel
self.prob = prob
def apply_hue(self, img):
low, high, prob = self.hue
if np.random.uniform(0., 1.) < prob:
return img
delta = np.random.uniform(low, high)
img = np.array(img.convert('HSV'))
img[:, :, 0] = img[:, :, 0] + delta
img = Image.fromarray(img, mode='HSV').convert('RGB')
return img
def apply_saturation(self, img):
low, high, prob = self.saturation
if np.random.uniform(0., 1.) < prob:
return img
delta = np.random.uniform(low, high)
img = ImageEnhance.Color(img).enhance(delta)
return img
def apply_contrast(self, img):
low, high, prob = self.contrast
if np.random.uniform(0., 1.) < prob:
return img
delta = np.random.uniform(low, high)
img = ImageEnhance.Contrast(img).enhance(delta)
return img
def apply_brightness(self, img):
low, high, prob = self.brightness
if np.random.uniform(0., 1.) < prob:
return img
delta = np.random.uniform(low, high)
img = ImageEnhance.Brightness(img).enhance(delta)
return img
def apply(self, sample, context=None):
if random.random() > self.prob:
return sample
img = sample['image']
img = Image.fromarray(img.astype(np.uint8))
if self.random_apply:
functions = [
self.apply_brightness, self.apply_contrast,
self.apply_saturation, self.apply_hue
]
distortions = np.random.permutation(functions)[:self.count]
for func in distortions:
img = func(img)
img = np.asarray(img).astype(np.float32)
sample['image'] = img
return sample
img = self.apply_brightness(img)
mode = np.random.randint(0, 2)
if mode:
img = self.apply_contrast(img)
img = self.apply_saturation(img)
img = self.apply_hue(img)
if not mode:
img = self.apply_contrast(img)
img = np.asarray(img).astype(np.float32)
if self.random_channel:
if np.random.randint(0, 2):
img = img[..., np.random.permutation(3)]
sample['image'] = img
return sample
@register_op
class PhotoMetricDistortion(BaseOperator):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
super(PhotoMetricDistortion, self).__init__()
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def apply(self, results, context=None):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
img = results['image']
img = img.astype(np.float32)
# random brightness
if np.random.randint(2):
delta = np.random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = np.random.randint(2)
if mode == 1:
if np.random.randint(2):
alpha = np.random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# random saturation
if np.random.randint(2):
img[..., 1] *= np.random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if np.random.randint(2):
img[..., 0] += np.random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
# random contrast
if mode == 0:
if np.random.randint(2):
alpha = np.random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if np.random.randint(2):
img = img[..., np.random.permutation(3)]
results['image'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
repr_str += 'contrast_range='
repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
repr_str += 'saturation_range='
repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
repr_str += f'hue_delta={self.hue_delta})'
return repr_str
@register_op
class AutoAugment(BaseOperator):
def __init__(self, autoaug_type="v1"):
"""
Args:
autoaug_type (str): autoaug type, support v0, v1, v2, v3, test
"""
super(AutoAugment, self).__init__()
self.autoaug_type = autoaug_type
def apply(self, sample, context=None):
"""
Learning Data Augmentation Strategies for Object Detection, see https://arxiv.org/abs/1906.11172
"""
im = sample['image']
gt_bbox = sample['gt_bbox']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image is not a numpy array.".format(self))
if len(im.shape) != 3:
raise ImageError("{}: image is not 3-dimensional.".format(self))
if len(gt_bbox) == 0:
return sample
height, width, _ = im.shape
norm_gt_bbox = np.ones_like(gt_bbox, dtype=np.float32)
norm_gt_bbox[:, 0] = gt_bbox[:, 1] / float(height)
norm_gt_bbox[:, 1] = gt_bbox[:, 0] / float(width)
norm_gt_bbox[:, 2] = gt_bbox[:, 3] / float(height)
norm_gt_bbox[:, 3] = gt_bbox[:, 2] / float(width)
from .autoaugment_utils import distort_image_with_autoaugment
im, norm_gt_bbox = distort_image_with_autoaugment(im, norm_gt_bbox,
self.autoaug_type)
gt_bbox[:, 0] = norm_gt_bbox[:, 1] * float(width)
gt_bbox[:, 1] = norm_gt_bbox[:, 0] * float(height)
gt_bbox[:, 2] = norm_gt_bbox[:, 3] * float(width)
gt_bbox[:, 3] = norm_gt_bbox[:, 2] * float(height)
sample['image'] = im
sample['gt_bbox'] = gt_bbox
return sample
@register_op
class RandomFlip(BaseOperator):
def __init__(self, prob=0.5):
"""
Args:
prob (float): the probability of flipping image
"""
super(RandomFlip, self).__init__()
self.prob = prob
if not (isinstance(self.prob, float)):
raise TypeError("{}: input type is invalid.".format(self))
def apply_segm(self, segms, height, width):
def _flip_poly(poly, width):
flipped_poly = np.array(poly)
flipped_poly[0::2] = width - np.array(poly[0::2])
return flipped_poly.tolist()
def _flip_rle(rle, height, width):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
mask = mask[:, ::-1]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
flipped_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
flipped_segms.append([_flip_poly(poly, width) for poly in segm])
else:
# RLE format
import pycocotools.mask as mask_util
flipped_segms.append(_flip_rle(segm, height, width))
return flipped_segms
def apply_keypoint(self, gt_keypoint, width):
for i in range(gt_keypoint.shape[1]):
if i % 2 == 0:
old_x = gt_keypoint[:, i].copy()
gt_keypoint[:, i] = width - old_x
return gt_keypoint
def apply_image(self, image):
return image[:, ::-1, :]
def apply_bbox(self, bbox, width):
oldx1 = bbox[:, 0].copy()
oldx2 = bbox[:, 2].copy()
bbox[:, 0] = width - oldx2
bbox[:, 2] = width - oldx1
return bbox
def apply(self, sample, context=None):
"""Filp the image and bounding box.
Operators:
1. Flip the image numpy.
2. Transform the bboxes' x coordinates.
(Must judge whether the coordinates are normalized!)
3. Transform the segmentations' x coordinates.
(Must judge whether the coordinates are normalized!)
Output:
sample: the image, bounding box and segmentation part
in sample are flipped.
"""
if np.random.uniform(0, 1) < self.prob:
im = sample['image']
height, width = im.shape[:2]
im = self.apply_image(im)
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], width)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], height,
width)
if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
sample['gt_keypoint'] = self.apply_keypoint(
sample['gt_keypoint'], width)
if 'semantic' in sample and sample['semantic']:
sample['semantic'] = sample['semantic'][:, ::-1]
if 'gt_segm' in sample and sample['gt_segm'].any():
sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]
sample['flipped'] = True
sample['image'] = im
return sample
@register_op
class Resize(BaseOperator):
def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):
"""
Resize image to target size. if keep_ratio is True,
resize the image's long side to the maximum of target_size
if keep_ratio is False, resize the image to target size(h, w)
Args:
target_size (int|list): image target size
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): the interpolation method
"""
super(Resize, self).__init__()
self.keep_ratio = keep_ratio
self.interp = interp
if not isinstance(target_size, (Integral, Sequence)):
raise TypeError(
"Type of target_size is invalid. Must be Integer or List or Tuple, now is {}".
format(type(target_size)))
if isinstance(target_size, Integral):
target_size = [target_size, target_size]
self.target_size = target_size
def apply_image(self, image, scale):
im_scale_x, im_scale_y = scale
return cv2.resize(
image,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
def apply_bbox(self, bbox, scale, size):
im_scale_x, im_scale_y = scale
resize_w, resize_h = size
bbox[:, 0::2] *= im_scale_x
bbox[:, 1::2] *= im_scale_y
bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
return bbox
def apply_area(self, area, scale):
im_scale_x, im_scale_y = scale
return area * im_scale_x * im_scale_y
def apply_joints(self, joints, scale, size):
im_scale_x, im_scale_y = scale
resize_w, resize_h = size
joints[..., 0] *= im_scale_x
joints[..., 1] *= im_scale_y
joints[..., 0] = np.clip(joints[..., 0], 0, resize_w)
joints[..., 1] = np.clip(joints[..., 1], 0, resize_h)
return joints
def apply_segm(self, segms, im_size, scale):
def _resize_poly(poly, im_scale_x, im_scale_y):
resized_poly = np.array(poly).astype('float32')
resized_poly[0::2] *= im_scale_x
resized_poly[1::2] *= im_scale_y
return resized_poly.tolist()
def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, im_h, im_w)
mask = mask_util.decode(rle)
mask = cv2.resize(
mask,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
im_h, im_w = im_size
im_scale_x, im_scale_y = scale
resized_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
resized_segms.append([
_resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
])
else:
# RLE format
import pycocotools.mask as mask_util
resized_segms.append(
_resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
return resized_segms
def apply(self, sample, context=None):
""" Resize the image numpy.
"""
im = sample['image']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image type is not numpy.".format(self))
# apply image
if len(im.shape) == 3:
im_shape = im.shape
else:
im_shape = im[0].shape
if self.keep_ratio:
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = min(target_size_min / im_size_min,
target_size_max / im_size_max)
resize_h = int(im_scale * float(im_shape[0]) + 0.5)
resize_w = int(im_scale * float(im_shape[1]) + 0.5)
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / im_shape[0]
im_scale_x = resize_w / im_shape[1]
if len(im.shape) == 3:
im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
sample['image'] = im.astype(np.float32)
else:
resized_images = []
for one_im in im:
applied_im = self.apply_image(one_im, [im_scale_x, im_scale_y])
resized_images.append(applied_im)
sample['image'] = np.array(resized_images)
# 2d keypoints resize
if 'kps2d' in sample.keys():
kps2d = sample['kps2d']
kps2d[:, :, 0] = kps2d[:, :, 0] * im_scale_x
kps2d[:, :, 1] = kps2d[:, :, 1] * im_scale_y
sample['kps2d'] = kps2d
sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
else:
sample['scale_factor'] = np.asarray(
[im_scale_y, im_scale_x], dtype=np.float32)
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
# apply areas
if 'gt_areas' in sample:
sample['gt_areas'] = self.apply_area(sample['gt_areas'],
[im_scale_x, im_scale_y])
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
[im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
semantic = sample['semantic']
semantic = cv2.resize(
semantic.astype('float32'),
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
semantic = np.asarray(semantic).astype('int32')
semantic = np.expand_dims(semantic, 0)
sample['semantic'] = semantic
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
masks = [
cv2.resize(
gt_segm,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=cv2.INTER_NEAREST)
for gt_segm in sample['gt_segm']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
if 'gt_joints' in sample:
sample['gt_joints'] = self.apply_joints(sample['gt_joints'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
return sample
@register_op
class MultiscaleTestResize(BaseOperator):
def __init__(self,
origin_target_size=[800, 1333],
target_size=[],
interp=cv2.INTER_LINEAR,
use_flip=True):
"""
Rescale image to the each size in target size, and capped at max_size.
Args:
origin_target_size (list): origin target size of image
target_size (list): A list of target sizes of image.
interp (int): the interpolation method.
use_flip (bool): whether use flip augmentation.
"""
super(MultiscaleTestResize, self).__init__()
self.interp = interp
self.use_flip = use_flip
if not isinstance(target_size, Sequence):
raise TypeError(
"Type of target_size is invalid. Must be List or Tuple, now is {}".
format(type(target_size)))
self.target_size = target_size
if not isinstance(origin_target_size, Sequence):
raise TypeError(
"Type of origin_target_size is invalid. Must be List or Tuple, now is {}".
format(type(origin_target_size)))
self.origin_target_size = origin_target_size
def apply(self, sample, context=None):
""" Resize the image numpy for multi-scale test.
"""
samples = []
resizer = Resize(
self.origin_target_size, keep_ratio=True, interp=self.interp)
samples.append(resizer(sample.copy(), context))
if self.use_flip:
flipper = RandomFlip(1.1)
samples.append(flipper(sample.copy(), context=context))
for size in self.target_size:
resizer = Resize(size, keep_ratio=True, interp=self.interp)
samples.append(resizer(sample.copy(), context))
return samples
@register_op
class RandomResize(BaseOperator):
def __init__(self,
target_size,
keep_ratio=True,
interp=cv2.INTER_LINEAR,
random_range=False,
random_size=True,
random_interp=False):
"""
Resize image to target size randomly. random target_size and interpolation method
Args:
target_size (int, list, tuple): image target size, if random size is True, must be list or tuple
keep_ratio (bool): whether keep_raio or not, default true
interp (int): the interpolation method
random_range (bool): whether random select target size of image, the target_size must be
a [[min_short_edge, long_edge], [max_short_edge, long_edge]]
random_size (bool): whether random select target size of image
random_interp (bool): whether random select interpolation method
"""
super(RandomResize, self).__init__()
self.keep_ratio = keep_ratio
self.interp = interp
self.interps = [
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_AREA,
cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4,
]
assert isinstance(target_size, (
Integral, Sequence)), "target_size must be Integer, List or Tuple"
if (random_range or random_size) and not isinstance(target_size,
Sequence):
raise TypeError(
"Type of target_size is invalid when random_size or random_range is True. Must be List or Tuple, now is {}".
format(type(target_size)))
if random_range and not len(target_size) == 2:
raise TypeError(
"target_size must be two list as [[min_short_edge, long_edge], [max_short_edge, long_edge]] when random_range is True."
)
self.target_size = target_size
self.random_range = random_range
self.random_size = random_size
self.random_interp = random_interp
def apply(self, sample, context=None):
""" Resize the image numpy.
"""
if self.random_range:
short_edge = np.random.randint(self.target_size[0][0],
self.target_size[1][0] + 1)
long_edge = max(self.target_size[0][1], self.target_size[1][1] + 1)
target_size = [short_edge, long_edge]
else:
if self.random_size:
target_size = random.choice(self.target_size)
else:
target_size = self.target_size
if self.random_interp:
interp = random.choice(self.interps)
else:
interp = self.interp
resizer = Resize(target_size, self.keep_ratio, interp)
return resizer(sample, context=context)
@register_op
class RandomExpand(BaseOperator):
"""Random expand the canvas.
Args:
ratio (float): maximum expansion ratio.
prob (float): probability to expand.
fill_value (list): color value used to fill the canvas. in RGB order.
"""
def __init__(self, ratio=4., prob=0.5, fill_value=(127.5, 127.5, 127.5)):
super(RandomExpand, self).__init__()
assert ratio > 1.01, "expand ratio must be larger than 1.01"
self.ratio = ratio
self.prob = prob
assert isinstance(fill_value, (Number, Sequence)), \
"fill value must be either float or sequence"
if isinstance(fill_value, Number):
fill_value = (fill_value, ) * 3
if not isinstance(fill_value, tuple):
fill_value = tuple(fill_value)
self.fill_value = fill_value
def apply(self, sample, context=None):
if np.random.uniform(0., 1.) < self.prob:
return sample
im = sample['image']
height, width = im.shape[:2]
ratio = np.random.uniform(1., self.ratio)
h = int(height * ratio)
w = int(width * ratio)
if not h > height or not w > width:
return sample
y = np.random.randint(0, h - height)
x = np.random.randint(0, w - width)
offsets, size = [x, y], [h, w]
pad = Pad(size,
pad_mode=-1,
offsets=offsets,
fill_value=self.fill_value)
return pad(sample, context=context)
@register_op
class CropWithSampling(BaseOperator):
def __init__(self, batch_sampler, satisfy_all=False, avoid_no_bbox=True):
"""
Args:
batch_sampler (list): Multiple sets of different
parameters for cropping.
satisfy_all (bool): whether all boxes must satisfy.
e.g.[[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],
[1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]
[max sample, max trial, min scale, max scale,
min aspect ratio, max aspect ratio,
min overlap, max overlap]
avoid_no_bbox (bool): whether to avoid the
situation where the box does not appear.
"""
super(CropWithSampling, self).__init__()
self.batch_sampler = batch_sampler
self.satisfy_all = satisfy_all
self.avoid_no_bbox = avoid_no_bbox
def apply(self, sample, context):
"""
Crop the image and modify bounding box.
Operators:
1. Scale the image width and height.
2. Crop the image according to a radom sample.
3. Rescale the bounding box.
4. Determine if the new bbox is satisfied in the new image.
Returns:
sample: the image, bounding box are replaced.
"""
assert 'image' in sample, "image data not found"
im = sample['image']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
im_height, im_width = im.shape[:2]
gt_score = None
if 'gt_score' in sample:
gt_score = sample['gt_score']
sampled_bbox = []
gt_bbox = gt_bbox.tolist()
for sampler in self.batch_sampler:
found = 0
for i in range(sampler[1]):
if found >= sampler[0]:
break
sample_bbox = generate_sample_bbox(sampler)
if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox,
self.satisfy_all):
sampled_bbox.append(sample_bbox)
found = found + 1
im = np.array(im)
while sampled_bbox:
idx = int(np.random.uniform(0, len(sampled_bbox)))
sample_bbox = sampled_bbox.pop(idx)
sample_bbox = clip_bbox(sample_bbox)
crop_bbox, crop_class, crop_score = \
filter_and_process(sample_bbox, gt_bbox, gt_class, scores=gt_score)
if self.avoid_no_bbox:
if len(crop_bbox) < 1:
continue
xmin = int(sample_bbox[0] * im_width)
xmax = int(sample_bbox[2] * im_width)
ymin = int(sample_bbox[1] * im_height)
ymax = int(sample_bbox[3] * im_height)
im = im[ymin:ymax, xmin:xmax]
sample['image'] = im
sample['gt_bbox'] = crop_bbox
sample['gt_class'] = crop_class
sample['gt_score'] = crop_score
return sample
return sample
@register_op
class CropWithDataAchorSampling(BaseOperator):
def __init__(self,
batch_sampler,
anchor_sampler=None,
target_size=None,
das_anchor_scales=[16, 32, 64, 128],
sampling_prob=0.5,
min_size=8.,
avoid_no_bbox=True):
"""
Args:
anchor_sampler (list): anchor_sampling sets of different
parameters for cropping.
batch_sampler (list): Multiple sets of different
parameters for cropping.
e.g.[[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]]
[[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0]]
[max sample, max trial, min scale, max scale,
min aspect ratio, max aspect ratio,
min overlap, max overlap, min coverage, max coverage]
target_size (int): target image size.
das_anchor_scales (list[float]): a list of anchor scales in data
anchor smapling.
min_size (float): minimum size of sampled bbox.
avoid_no_bbox (bool): whether to avoid the
situation where the box does not appear.
"""
super(CropWithDataAchorSampling, self).__init__()
self.anchor_sampler = anchor_sampler
self.batch_sampler = batch_sampler
self.target_size = target_size
self.sampling_prob = sampling_prob
self.min_size = min_size
self.avoid_no_bbox = avoid_no_bbox
self.das_anchor_scales = np.array(das_anchor_scales)
def apply(self, sample, context):
"""
Crop the image and modify bounding box.
Operators:
1. Scale the image width and height.
2. Crop the image according to a radom sample.
3. Rescale the bounding box.
4. Determine if the new bbox is satisfied in the new image.
Returns:
sample: the image, bounding box are replaced.
"""
assert 'image' in sample, "image data not found"
im = sample['image']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
image_height, image_width = im.shape[:2]
gt_bbox[:, 0] /= image_width
gt_bbox[:, 1] /= image_height
gt_bbox[:, 2] /= image_width
gt_bbox[:, 3] /= image_height
gt_score = None
if 'gt_score' in sample:
gt_score = sample['gt_score']
sampled_bbox = []
gt_bbox = gt_bbox.tolist()
prob = np.random.uniform(0., 1.)
if prob > self.sampling_prob: # anchor sampling
assert self.anchor_sampler
for sampler in self.anchor_sampler:
found = 0
for i in range(sampler[1]):
if found >= sampler[0]:
break
sample_bbox = data_anchor_sampling(
gt_bbox, image_width, image_height,
self.das_anchor_scales, self.target_size)
if sample_bbox == 0:
break
if satisfy_sample_constraint_coverage(sampler, sample_bbox,
gt_bbox):
sampled_bbox.append(sample_bbox)
found = found + 1
im = np.array(im)
while sampled_bbox:
idx = int(np.random.uniform(0, len(sampled_bbox)))
sample_bbox = sampled_bbox.pop(idx)
if 'gt_keypoint' in sample.keys():
keypoints = (sample['gt_keypoint'],
sample['keypoint_ignore'])
crop_bbox, crop_class, crop_score, gt_keypoints = \
filter_and_process(sample_bbox, gt_bbox, gt_class,
scores=gt_score,
keypoints=keypoints)
else:
crop_bbox, crop_class, crop_score = filter_and_process(
sample_bbox, gt_bbox, gt_class, scores=gt_score)
crop_bbox, crop_class, crop_score = bbox_area_sampling(
crop_bbox, crop_class, crop_score, self.target_size,
self.min_size)
if self.avoid_no_bbox:
if len(crop_bbox) < 1:
continue
im = crop_image_sampling(im, sample_bbox, image_width,
image_height, self.target_size)
height, width = im.shape[:2]
crop_bbox[:, 0] *= width
crop_bbox[:, 1] *= height
crop_bbox[:, 2] *= width
crop_bbox[:, 3] *= height
sample['image'] = im
sample['gt_bbox'] = crop_bbox
sample['gt_class'] = crop_class
if 'gt_score' in sample:
sample['gt_score'] = crop_score
if 'gt_keypoint' in sample.keys():
sample['gt_keypoint'] = gt_keypoints[0]
sample['keypoint_ignore'] = gt_keypoints[1]
return sample
return sample
else:
for sampler in self.batch_sampler:
found = 0
for i in range(sampler[1]):
if found >= sampler[0]:
break
sample_bbox = generate_sample_bbox_square(
sampler, image_width, image_height)
if satisfy_sample_constraint_coverage(sampler, sample_bbox,
gt_bbox):
sampled_bbox.append(sample_bbox)
found = found + 1
im = np.array(im)
while sampled_bbox:
idx = int(np.random.uniform(0, len(sampled_bbox)))
sample_bbox = sampled_bbox.pop(idx)
sample_bbox = clip_bbox(sample_bbox)
if 'gt_keypoint' in sample.keys():
keypoints = (sample['gt_keypoint'],
sample['keypoint_ignore'])
crop_bbox, crop_class, crop_score, gt_keypoints = \
filter_and_process(sample_bbox, gt_bbox, gt_class,
scores=gt_score,
keypoints=keypoints)
else:
crop_bbox, crop_class, crop_score = filter_and_process(
sample_bbox, gt_bbox, gt_class, scores=gt_score)
# sampling bbox according the bbox area
crop_bbox, crop_class, crop_score = bbox_area_sampling(
crop_bbox, crop_class, crop_score, self.target_size,
self.min_size)
if self.avoid_no_bbox:
if len(crop_bbox) < 1:
continue
xmin = int(sample_bbox[0] * image_width)
xmax = int(sample_bbox[2] * image_width)
ymin = int(sample_bbox[1] * image_height)
ymax = int(sample_bbox[3] * image_height)
im = im[ymin:ymax, xmin:xmax]
height, width = im.shape[:2]
crop_bbox[:, 0] *= width
crop_bbox[:, 1] *= height
crop_bbox[:, 2] *= width
crop_bbox[:, 3] *= height
sample['image'] = im
sample['gt_bbox'] = crop_bbox
sample['gt_class'] = crop_class
if 'gt_score' in sample:
sample['gt_score'] = crop_score
if 'gt_keypoint' in sample.keys():
sample['gt_keypoint'] = gt_keypoints[0]
sample['keypoint_ignore'] = gt_keypoints[1]
return sample
return sample
@register_op
class RandomCrop(BaseOperator):
"""Random crop image and bboxes.
Args:
aspect_ratio (list): aspect ratio of cropped region.
in [min, max] format.
thresholds (list): iou thresholds for decide a valid bbox crop.
scaling (list): ratio between a cropped region and the original image.
in [min, max] format.
num_attempts (int): number of tries before giving up.
allow_no_crop (bool): allow return without actually cropping them.
cover_all_box (bool): ensure all bboxes are covered in the final crop.
is_mask_crop(bool): whether crop the segmentation.
"""
def __init__(self,
aspect_ratio=[.5, 2.],
thresholds=[.0, .1, .3, .5, .7, .9],
scaling=[.3, 1.],
num_attempts=50,
allow_no_crop=True,
cover_all_box=False,
is_mask_crop=False,
ioumode="iou",
prob=1.0):
super(RandomCrop, self).__init__()
self.aspect_ratio = aspect_ratio
self.thresholds = thresholds
self.scaling = scaling
self.num_attempts = num_attempts
self.allow_no_crop = allow_no_crop
self.cover_all_box = cover_all_box
self.is_mask_crop = is_mask_crop
self.ioumode = ioumode
self.prob = prob
def crop_segms(self, segms, valid_ids, crop, height, width):
def _crop_poly(segm, crop):
xmin, ymin, xmax, ymax = crop
crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
crop_p = np.array(crop_coord).reshape(4, 2)
crop_p = Polygon(crop_p)
crop_segm = list()
for poly in segm:
poly = np.array(poly).reshape(len(poly) // 2, 2)
polygon = Polygon(poly)
if not polygon.is_valid:
exterior = polygon.exterior
multi_lines = exterior.intersection(exterior)
polygons = shapely.ops.polygonize(multi_lines)
polygon = MultiPolygon(polygons)
multi_polygon = list()
if isinstance(polygon, MultiPolygon):
multi_polygon = copy.deepcopy(polygon)
else:
multi_polygon.append(copy.deepcopy(polygon))
for per_polygon in multi_polygon:
inter = per_polygon.intersection(crop_p)
if not inter:
continue
if isinstance(inter, (MultiPolygon, GeometryCollection)):
for part in inter:
if not isinstance(part, Polygon):
continue
part = np.squeeze(
np.array(part.exterior.coords[:-1]).reshape(1,
-1))
part[0::2] -= xmin
part[1::2] -= ymin
crop_segm.append(part.tolist())
elif isinstance(inter, Polygon):
crop_poly = np.squeeze(
np.array(inter.exterior.coords[:-1]).reshape(1, -1))
crop_poly[0::2] -= xmin
crop_poly[1::2] -= ymin
crop_segm.append(crop_poly.tolist())
else:
continue
return crop_segm
def _crop_rle(rle, crop, height, width):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
crop_segms = []
for id in valid_ids:
segm = segms[id]
if is_poly(segm):
import copy
import shapely.ops
from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
logging.getLogger("shapely").setLevel(logging.WARNING)
# Polygon format
crop_segms.append(_crop_poly(segm, crop))
else:
# RLE format
import pycocotools.mask as mask_util
crop_segms.append(_crop_rle(segm, crop, height, width))
return crop_segms
def set_fake_bboxes(self, sample):
sample['gt_bbox'] = np.array(
[
[32, 32, 128, 128],
[32, 32, 128, 256],
[32, 64, 128, 128],
[32, 64, 128, 256],
[64, 64, 128, 256],
[64, 64, 256, 256],
[64, 32, 128, 256],
[64, 32, 128, 256],
[96, 32, 128, 256],
[96, 32, 128, 256],
],
dtype=np.float32)
sample['gt_class'] = np.array(
[[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]], np.int32)
return sample
def apply(self, sample, context=None):
if random.random() > self.prob:
return sample
if 'gt_bbox' not in sample:
# only used in semi-det as unsup data
sample = self.set_fake_bboxes(sample)
sample = self.random_crop(sample, fake_bboxes=True)
del sample['gt_bbox']
del sample['gt_class']
return sample
if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
return sample
sample = self.random_crop(sample)
return sample
def random_crop(self, sample, fake_bboxes=False):
h, w = sample['image'].shape[:2]
gt_bbox = sample['gt_bbox']
# NOTE Original method attempts to generate one candidate for each
# threshold then randomly sample one from the resulting list.
# Here a short circuit approach is taken, i.e., randomly choose a
# threshold and attempt to find a valid crop, and simply return the
# first one found.
# The probability is not exactly the same, kinda resembling the
# "Monty Hall" problem. Actually carrying out the attempts will affect
# observability (just like opening doors in the "Monty Hall" game).
thresholds = list(self.thresholds)
if self.allow_no_crop:
thresholds.append('no_crop')
np.random.shuffle(thresholds)
for thresh in thresholds:
if thresh == 'no_crop':
return sample
found = False
for i in range(self.num_attempts):
scale = np.random.uniform(*self.scaling)
if self.aspect_ratio is not None:
min_ar, max_ar = self.aspect_ratio
aspect_ratio = np.random.uniform(
max(min_ar, scale**2), min(max_ar, scale**-2))
h_scale = scale / np.sqrt(aspect_ratio)
w_scale = scale * np.sqrt(aspect_ratio)
else:
h_scale = np.random.uniform(*self.scaling)
w_scale = np.random.uniform(*self.scaling)
crop_h = h * h_scale
crop_w = w * w_scale
if self.aspect_ratio is None:
if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:
continue
crop_h = int(crop_h)
crop_w = int(crop_w)
crop_y = np.random.randint(0, h - crop_h)
crop_x = np.random.randint(0, w - crop_w)
crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
if self.ioumode == "iof":
iou = self._gtcropiou_matrix(
gt_bbox, np.array(
[crop_box], dtype=np.float32))
elif self.ioumode == "iou":
iou = self._iou_matrix(
gt_bbox, np.array(
[crop_box], dtype=np.float32))
if iou.max() < thresh:
continue
if self.cover_all_box and iou.min() < thresh:
continue
cropped_box, valid_ids = self._crop_box_with_center_constraint(
gt_bbox, np.array(
crop_box, dtype=np.float32))
if valid_ids.size > 0:
found = True
break
if found:
if self.is_mask_crop and 'gt_poly' in sample and len(sample[
'gt_poly']) > 0:
crop_polys = self.crop_segms(
sample['gt_poly'],
valid_ids,
np.array(
crop_box, dtype=np.int64),
h,
w)
if [] in crop_polys:
delete_id = list()
valid_polys = list()
for id, crop_poly in enumerate(crop_polys):
if crop_poly == []:
delete_id.append(id)
else:
valid_polys.append(crop_poly)
valid_ids = np.delete(valid_ids, delete_id)
if len(valid_polys) == 0:
return sample
sample['gt_poly'] = valid_polys
else:
sample['gt_poly'] = crop_polys
if 'gt_segm' in sample:
sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
crop_box)
sample['gt_segm'] = np.take(
sample['gt_segm'], valid_ids, axis=0)
sample['image'] = self._crop_image(sample['image'], crop_box)
if fake_bboxes == True:
return sample
sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
sample['gt_class'] = np.take(
sample['gt_class'], valid_ids, axis=0)
if 'gt_score' in sample:
sample['gt_score'] = np.take(
sample['gt_score'], valid_ids, axis=0)
if 'is_crowd' in sample:
sample['is_crowd'] = np.take(
sample['is_crowd'], valid_ids, axis=0)
if 'difficult' in sample:
sample['difficult'] = np.take(
sample['difficult'], valid_ids, axis=0)
if 'gt_joints' in sample:
sample['gt_joints'] = self._crop_joints(sample['gt_joints'],
crop_box)
return sample
return sample
def _iou_matrix(self, a, b):
tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])
br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
area_o = (area_a[:, np.newaxis] + area_b - area_i)
return area_i / (area_o + 1e-10)
def _gtcropiou_matrix(self, a, b):
tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])
br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
area_o = (area_a[:, np.newaxis] + area_b - area_i)
return area_i / (area_a + 1e-10)
def _crop_box_with_center_constraint(self, box, crop):
cropped_box = box.copy()
cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])
cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])
cropped_box[:, :2] -= crop[:2]
cropped_box[:, 2:] -= crop[:2]
centers = (box[:, :2] + box[:, 2:]) / 2
valid = np.logical_and(crop[:2] <= centers,
centers < crop[2:]).all(axis=1)
valid = np.logical_and(
valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))
return cropped_box, np.where(valid)[0]
def _crop_image(self, img, crop):
x1, y1, x2, y2 = crop
return img[y1:y2, x1:x2, :]
def _crop_segm(self, segm, crop):
x1, y1, x2, y2 = crop
return segm[:, y1:y2, x1:x2]
def _crop_joints(self, joints, crop):
x1, y1, x2, y2 = crop
joints[joints[..., 0] > x2, :] = 0
joints[joints[..., 1] > y2, :] = 0
joints[joints[..., 0] < x1, :] = 0
joints[joints[..., 1] < y1, :] = 0
joints[..., 0] -= x1
joints[..., 1] -= y1
return joints
@register_op
class RandomScaledCrop(BaseOperator):
"""Resize image and bbox based on long side (with optional random scaling),
then crop or pad image to target size.
Args:
target_size (int|list): target size, "hw" format.
scale_range (list): random scale range.
interp (int): interpolation method, default to `cv2.INTER_LINEAR`.
fill_value (float|list|tuple): color value used to fill the canvas,
in RGB order.
"""
def __init__(self,
target_size=512,
scale_range=[.1, 2.],
interp=cv2.INTER_LINEAR,
fill_value=(123.675, 116.28, 103.53)):
super(RandomScaledCrop, self).__init__()
assert isinstance(target_size, (
Integral, Sequence)), "target_size must be Integer, List or Tuple"
if isinstance(target_size, Integral):
target_size = [target_size, ] * 2
self.target_size = target_size
self.scale_range = scale_range
self.interp = interp
assert isinstance(fill_value, (Number, Sequence)), \
"fill value must be either float or sequence"
if isinstance(fill_value, Number):
fill_value = (fill_value, ) * 3
if not isinstance(fill_value, tuple):
fill_value = tuple(fill_value)
self.fill_value = fill_value
def apply_image(self, img, output_size, offset_x, offset_y):
th, tw = self.target_size
rh, rw = output_size
img = cv2.resize(
img, (rw, rh), interpolation=self.interp).astype(np.float32)
canvas = np.ones([th, tw, 3], dtype=np.float32)
canvas *= np.array(self.fill_value, dtype=np.float32)
canvas[:min(th, rh), :min(tw, rw)] = \
img[offset_y:offset_y + th, offset_x:offset_x + tw]
return canvas
def apply_bbox(self, gt_bbox, gt_class, scale, offset_x, offset_y):
th, tw = self.target_size
shift_array = np.array(
[
offset_x,
offset_y,
] * 2, dtype=np.float32)
boxes = gt_bbox * scale - shift_array
boxes[:, 0::2] = np.clip(boxes[:, 0::2], 0, tw)
boxes[:, 1::2] = np.clip(boxes[:, 1::2], 0, th)
# filter boxes with no area
area = np.prod(boxes[..., 2:] - boxes[..., :2], axis=1)
valid = (area > 1.).nonzero()[0]
return boxes[valid], gt_class[valid], valid
def apply_segm(self, segms, output_size, offset_x, offset_y, valid=None):
th, tw = self.target_size
rh, rw = output_size
out_segms = []
for segm in segms:
segm = cv2.resize(segm, (rw, rh), interpolation=cv2.INTER_NEAREST)
segm = segm.astype(np.float32)
canvas = np.zeros([th, tw], dtype=segm.dtype)
canvas[:min(th, rh), :min(tw, rw)] = \
segm[offset_y:offset_y + th, offset_x:offset_x + tw]
out_segms.append(canvas)
out_segms = np.stack(out_segms)
return out_segms if valid is None else out_segms[valid]
def apply(self, sample, context=None):
img = sample['image']
h, w = img.shape[:2]
random_scale = np.random.uniform(*self.scale_range)
target_scale_size = [t * random_scale for t in self.target_size]
# Compute actual rescaling applied to image.
scale = min(target_scale_size[0] / h, target_scale_size[1] / w)
output_size = [int(round(h * scale)), int(round(w * scale))]
# get offset
offset_x = int(
max(0, np.random.uniform(0., output_size[1] - self.target_size[1])))
offset_y = int(
max(0, np.random.uniform(0., output_size[0] - self.target_size[0])))
# apply to image
sample['image'] = self.apply_image(img, output_size, offset_x, offset_y)
# apply to bbox
valid = None
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'], sample['gt_class'], valid = self.apply_bbox(
sample['gt_bbox'], sample['gt_class'], scale, offset_x,
offset_y)
# apply to segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
sample['gt_segm'] = self.apply_segm(sample['gt_segm'], output_size,
offset_x, offset_y, valid)
sample['im_shape'] = np.asarray(output_size, dtype=np.float32)
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * scale, scale_factor[1] * scale],
dtype=np.float32)
return sample
@register_op
class Cutmix(BaseOperator):
def __init__(self, alpha=1.5, beta=1.5):
"""
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://arxiv.org/abs/1905.04899
Cutmix image and gt_bbbox/gt_score
Args:
alpha (float): alpha parameter of beta distribute
beta (float): beta parameter of beta distribute
"""
super(Cutmix, self).__init__()
self.alpha = alpha
self.beta = beta
if self.alpha <= 0.0:
raise ValueError("alpha shold be positive in {}".format(self))
if self.beta <= 0.0:
raise ValueError("beta shold be positive in {}".format(self))
def apply_image(self, img1, img2, factor):
""" _rand_bbox """
h = max(img1.shape[0], img2.shape[0])
w = max(img1.shape[1], img2.shape[1])
cut_rat = np.sqrt(1. - factor)
cut_w = np.int32(w * cut_rat)
cut_h = np.int32(h * cut_rat)
# uniform
cx = np.random.randint(w)
cy = np.random.randint(h)
bbx1 = np.clip(cx - cut_w // 2, 0, w - 1)
bby1 = np.clip(cy - cut_h // 2, 0, h - 1)
bbx2 = np.clip(cx + cut_w // 2, 0, w - 1)
bby2 = np.clip(cy + cut_h // 2, 0, h - 1)
img_1_pad = np.zeros((h, w, img1.shape[2]), 'float32')
img_1_pad[:img1.shape[0], :img1.shape[1], :] = \
img1.astype('float32')
img_2_pad = np.zeros((h, w, img2.shape[2]), 'float32')
img_2_pad[:img2.shape[0], :img2.shape[1], :] = \
img2.astype('float32')
img_1_pad[bby1:bby2, bbx1:bbx2, :] = img_2_pad[bby1:bby2, bbx1:bbx2, :]
return img_1_pad
def __call__(self, sample, context=None):
if not isinstance(sample, Sequence):
return sample
assert len(sample) == 2, 'cutmix need two samples'
factor = np.random.beta(self.alpha, self.beta)
factor = max(0.0, min(1.0, factor))
if factor >= 1.0:
return sample[0]
if factor <= 0.0:
return sample[1]
img1 = sample[0]['image']
img2 = sample[1]['image']
img = self.apply_image(img1, img2, factor)
gt_bbox1 = sample[0]['gt_bbox']
gt_bbox2 = sample[1]['gt_bbox']
gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
gt_class1 = sample[0]['gt_class']
gt_class2 = sample[1]['gt_class']
gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
gt_score1 = np.ones_like(sample[0]['gt_class'])
gt_score2 = np.ones_like(sample[1]['gt_class'])
gt_score = np.concatenate(
(gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
result = copy.deepcopy(sample[0])
result['image'] = img
result['gt_bbox'] = gt_bbox
result['gt_score'] = gt_score
result['gt_class'] = gt_class
if 'is_crowd' in sample[0]:
is_crowd1 = sample[0]['is_crowd']
is_crowd2 = sample[1]['is_crowd']
is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
result['is_crowd'] = is_crowd
if 'difficult' in sample[0]:
is_difficult1 = sample[0]['difficult']
is_difficult2 = sample[1]['difficult']
is_difficult = np.concatenate(
(is_difficult1, is_difficult2), axis=0)
result['difficult'] = is_difficult
return result
@register_op
class Mixup(BaseOperator):
def __init__(self, alpha=1.5, beta=1.5):
""" Mixup image and gt_bbbox/gt_score
Args:
alpha (float): alpha parameter of beta distribute
beta (float): beta parameter of beta distribute
"""
super(Mixup, self).__init__()
self.alpha = alpha
self.beta = beta
if self.alpha <= 0.0:
raise ValueError("alpha shold be positive in {}".format(self))
if self.beta <= 0.0:
raise ValueError("beta shold be positive in {}".format(self))
def apply_image(self, img1, img2, factor):
h = max(img1.shape[0], img2.shape[0])
w = max(img1.shape[1], img2.shape[1])
img = np.zeros((h, w, img1.shape[2]), 'float32')
img[:img1.shape[0], :img1.shape[1], :] = \
img1.astype('float32') * factor
img[:img2.shape[0], :img2.shape[1], :] += \
img2.astype('float32') * (1.0 - factor)
return img.astype('uint8')
def __call__(self, sample, context=None):
if not isinstance(sample, Sequence):
return sample
assert len(sample) == 2, 'mixup need two samples'
factor = np.random.beta(self.alpha, self.beta)
factor = max(0.0, min(1.0, factor))
if factor >= 1.0:
return sample[0]
if factor <= 0.0:
return sample[1]
im = self.apply_image(sample[0]['image'], sample[1]['image'], factor)
result = copy.deepcopy(sample[0])
result['image'] = im
# apply bbox and score
if 'gt_bbox' in sample[0]:
gt_bbox1 = sample[0]['gt_bbox']
gt_bbox2 = sample[1]['gt_bbox']
gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
result['gt_bbox'] = gt_bbox
if 'gt_class' in sample[0]:
gt_class1 = sample[0]['gt_class']
gt_class2 = sample[1]['gt_class']
gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
result['gt_class'] = gt_class
gt_score1 = np.ones_like(sample[0]['gt_class'])
gt_score2 = np.ones_like(sample[1]['gt_class'])
gt_score = np.concatenate(
(gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
result['gt_score'] = gt_score.astype('float32')
if 'is_crowd' in sample[0]:
is_crowd1 = sample[0]['is_crowd']
is_crowd2 = sample[1]['is_crowd']
is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
result['is_crowd'] = is_crowd
if 'difficult' in sample[0]:
is_difficult1 = sample[0]['difficult']
is_difficult2 = sample[1]['difficult']
is_difficult = np.concatenate(
(is_difficult1, is_difficult2), axis=0)
result['difficult'] = is_difficult
if 'gt_ide' in sample[0]:
gt_ide1 = sample[0]['gt_ide']
gt_ide2 = sample[1]['gt_ide']
gt_ide = np.concatenate((gt_ide1, gt_ide2), axis=0)
result['gt_ide'] = gt_ide
return result
@register_op
class NormalizeBox(BaseOperator):
"""Transform the bounding box's coornidates to [0,1]."""
def __init__(self):
super(NormalizeBox, self).__init__()
def apply(self, sample, context):
im = sample['image']
if 'gt_bbox' in sample.keys():
gt_bbox = sample['gt_bbox']
height, width, _ = im.shape
for i in range(gt_bbox.shape[0]):
gt_bbox[i][0] = gt_bbox[i][0] / width
gt_bbox[i][1] = gt_bbox[i][1] / height
gt_bbox[i][2] = gt_bbox[i][2] / width
gt_bbox[i][3] = gt_bbox[i][3] / height
sample['gt_bbox'] = gt_bbox
if 'gt_keypoint' in sample.keys():
gt_keypoint = sample['gt_keypoint']
for i in range(gt_keypoint.shape[1]):
if i % 2:
gt_keypoint[:, i] = gt_keypoint[:, i] / height
else:
gt_keypoint[:, i] = gt_keypoint[:, i] / width
sample['gt_keypoint'] = gt_keypoint
return sample
else:
return sample
@register_op
class BboxXYXY2XYWH(BaseOperator):
"""
Convert bbox XYXY format to XYWH format.
"""
def __init__(self):
super(BboxXYXY2XYWH, self).__init__()
def apply(self, sample, context=None):
if 'gt_bbox' in sample.keys():
bbox = sample['gt_bbox']
bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]
bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.
sample['gt_bbox'] = bbox
return sample
else:
return sample
@register_op
class PadBox(BaseOperator):
def __init__(self, num_max_boxes=50):
"""
Pad zeros to bboxes if number of bboxes is less than num_max_boxes.
Args:
num_max_boxes (int): the max number of bboxes
"""
self.num_max_boxes = num_max_boxes
super(PadBox, self).__init__()
def apply(self, sample, context=None):
assert 'gt_bbox' in sample
bbox = sample['gt_bbox']
gt_num = min(self.num_max_boxes, len(bbox))
num_max = self.num_max_boxes
# fields = context['fields'] if context else []
pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
if gt_num > 0:
pad_bbox[:gt_num, :] = bbox[:gt_num, :]
sample['gt_bbox'] = pad_bbox
if 'gt_class' in sample:
pad_class = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]
sample['gt_class'] = pad_class
if 'gt_score' in sample:
pad_score = np.zeros((num_max, ), dtype=np.float32)
if gt_num > 0:
pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
sample['gt_score'] = pad_score
# in training, for example in op ExpandImage,
# the bbox and gt_class is expandded, but the difficult is not,
# so, judging by it's length
if 'difficult' in sample:
pad_diff = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
sample['difficult'] = pad_diff
if 'is_crowd' in sample:
pad_crowd = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
sample['is_crowd'] = pad_crowd
if 'gt_ide' in sample:
pad_ide = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_ide[:gt_num] = sample['gt_ide'][:gt_num, 0]
sample['gt_ide'] = pad_ide
return sample
@register_op
class DebugVisibleImage(BaseOperator):
"""
In debug mode, visualize images according to `gt_box`.
(Currently only supported when not cropping and flipping image.)
"""
def __init__(self, output_dir='output/debug', is_normalized=False):
super(DebugVisibleImage, self).__init__()
self.is_normalized = is_normalized
self.output_dir = output_dir
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
if not isinstance(self.is_normalized, bool):
raise TypeError("{}: input type is invalid.".format(self))
def apply(self, sample, context=None):
image = Image.fromarray(sample['image'].astype(np.uint8))
out_file_name = '{:012d}.jpg'.format(sample['im_id'][0])
width = sample['w']
height = sample['h']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
draw = ImageDraw.Draw(image)
for i in range(gt_bbox.shape[0]):
if self.is_normalized:
gt_bbox[i][0] = gt_bbox[i][0] * width
gt_bbox[i][1] = gt_bbox[i][1] * height
gt_bbox[i][2] = gt_bbox[i][2] * width
gt_bbox[i][3] = gt_bbox[i][3] * height
xmin, ymin, xmax, ymax = gt_bbox[i]
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=2,
fill='green')
# draw label
text = str(gt_class[i][0])
tw, th = imagedraw_textsize_c(draw, text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill='green')
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
if 'gt_keypoint' in sample.keys():
gt_keypoint = sample['gt_keypoint']
if self.is_normalized:
for i in range(gt_keypoint.shape[1]):
if i % 2:
gt_keypoint[:, i] = gt_keypoint[:, i] * height
else:
gt_keypoint[:, i] = gt_keypoint[:, i] * width
for i in range(gt_keypoint.shape[0]):
keypoint = gt_keypoint[i]
for j in range(int(keypoint.shape[0] / 2)):
x1 = round(keypoint[2 * j]).astype(np.int32)
y1 = round(keypoint[2 * j + 1]).astype(np.int32)
draw.ellipse(
(x1, y1, x1 + 5, y1 + 5), fill='green', outline='green')
save_path = os.path.join(self.output_dir, out_file_name)
image.save(save_path, quality=95)
return sample
@register_op
class Pad(BaseOperator):
def __init__(self,
size=None,
size_divisor=32,
pad_mode=0,
offsets=None,
fill_value=(127.5, 127.5, 127.5)):
"""
Pad image to a specified size or multiple of size_divisor.
Args:
size (int, Sequence): image target size, if None, pad to multiple of size_divisor, default None
size_divisor (int): size divisor, default 32
pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top
offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1
fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)
"""
super(Pad, self).__init__()
if not isinstance(size, (int, Sequence)):
raise TypeError(
"Type of target_size is invalid when random_size is True. \
Must be List, now is {}".format(type(size)))
if isinstance(size, int):
size = [size, size]
assert pad_mode in [
-1, 0, 1, 2
], 'currently only supports four modes [-1, 0, 1, 2]'
if pad_mode == -1:
assert offsets, 'if pad_mode is -1, offsets should not be None'
self.size = size
self.size_divisor = size_divisor
self.pad_mode = pad_mode
self.fill_value = fill_value
self.offsets = offsets
def apply_segm(self, segms, offsets, im_size, size):
def _expand_poly(poly, x, y):
expanded_poly = np.array(poly)
expanded_poly[0::2] += x
expanded_poly[1::2] += y
return expanded_poly.tolist()
def _expand_rle(rle, x, y, height, width, h, w):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
expanded_mask = np.full((h, w), 0).astype(mask.dtype)
expanded_mask[y:y + height, x:x + width] = mask
rle = mask_util.encode(
np.array(
expanded_mask, order='F', dtype=np.uint8))
return rle
x, y = offsets
height, width = im_size
h, w = size
expanded_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
expanded_segms.append(
[_expand_poly(poly, x, y) for poly in segm])
else:
# RLE format
import pycocotools.mask as mask_util
expanded_segms.append(
_expand_rle(segm, x, y, height, width, h, w))
return expanded_segms
def apply_bbox(self, bbox, offsets):
return bbox + np.array(offsets * 2, dtype=np.float32)
def apply_keypoint(self, keypoints, offsets):
n = len(keypoints[0]) // 2
return keypoints + np.array(offsets * n, dtype=np.float32)
def apply_image(self, image, offsets, im_size, size):
x, y = offsets
im_h, im_w = im_size
h, w = size
canvas = np.ones((h, w, 3), dtype=np.float32)
canvas *= np.array(self.fill_value, dtype=np.float32)
canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
return canvas
def apply(self, sample, context=None):
im = sample['image']
im_h, im_w = im.shape[:2]
if self.size:
h, w = self.size
assert (
im_h <= h and im_w <= w
), '(h, w) of target size should be greater than (im_h, im_w)'
else:
h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)
w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)
if h == im_h and w == im_w:
sample['image'] = im.astype(np.float32)
return sample
if self.pad_mode == -1:
offset_x, offset_y = self.offsets
elif self.pad_mode == 0:
offset_y, offset_x = 0, 0
elif self.pad_mode == 1:
offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2
else:
offset_y, offset_x = h - im_h, w - im_w
offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]
sample['image'] = self.apply_image(im, offsets, im_size, size)
if self.pad_mode == 0:
return sample
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], offsets)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], offsets,
im_size, size)
if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:
sample['gt_keypoint'] = self.apply_keypoint(sample['gt_keypoint'],
offsets)
return sample
@register_op
class Poly2Mask(BaseOperator):
"""
gt poly to mask annotations.
Args:
del_poly (bool): Whether to delete poly after generating mask. Default: False.
"""
def __init__(self, del_poly=False):
super(Poly2Mask, self).__init__()
import pycocotools.mask as maskUtils
self.maskutils = maskUtils
self.del_poly = del_poly
def _poly2mask(self, mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
rle = self.maskutils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = self.maskutils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = self.maskutils.decode(rle)
return mask
def apply(self, sample, context=None):
assert 'gt_poly' in sample
im_h, im_w = sample['im_shape']
masks = [
self._poly2mask(gt_poly, im_h, im_w)
for gt_poly in sample['gt_poly']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
if self.del_poly:
del (sample['gt_poly'])
return sample
@register_op
class AugmentHSV(BaseOperator):
"""
Augment the SV channel of image data.
Args:
fraction (float): the fraction for augment. Default: 0.5.
is_bgr (bool): whether the image is BGR mode. Default: True.
hgain (float): H channel gains
sgain (float): S channel gains
vgain (float): V channel gains
"""
def __init__(self,
fraction=0.50,
is_bgr=True,
hgain=None,
sgain=None,
vgain=None):
super(AugmentHSV, self).__init__()
self.fraction = fraction
self.is_bgr = is_bgr
self.hgain = hgain
self.sgain = sgain
self.vgain = vgain
self.use_hsvgain = False if hgain is None else True
def apply(self, sample, context=None):
img = sample['image']
if self.is_bgr:
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
if self.use_hsvgain:
hsv_augs = np.random.uniform(
-1, 1, 3) * [self.hgain, self.sgain, self.vgain]
# random selection of h, s, v
hsv_augs *= np.random.randint(0, 2, 3)
img_hsv[..., 0] = (img_hsv[..., 0] + hsv_augs[0]) % 180
img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_augs[1], 0, 255)
img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_augs[2], 0, 255)
else:
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * self.fraction + 1
S *= a
if a > 1:
np.clip(S, a_min=0, a_max=255, out=S)
a = (random.random() * 2 - 1) * self.fraction + 1
V *= a
if a > 1:
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
if self.is_bgr:
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
else:
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB, dst=img)
sample['image'] = img.astype(np.float32)
return sample
@register_op
class Norm2PixelBbox(BaseOperator):
"""
Transform the bounding box's coornidates which is in [0,1] to pixels.
"""
def __init__(self):
super(Norm2PixelBbox, self).__init__()
def apply(self, sample, context=None):
assert 'gt_bbox' in sample
bbox = sample['gt_bbox']
height, width = sample['image'].shape[:2]
bbox[:, 0::2] = bbox[:, 0::2] * width
bbox[:, 1::2] = bbox[:, 1::2] * height
sample['gt_bbox'] = bbox
return sample
@register_op
class BboxCXCYWH2XYXY(BaseOperator):
"""
Convert bbox CXCYWH format to XYXY format.
[center_x, center_y, width, height] -> [x0, y0, x1, y1]
"""
def __init__(self):
super(BboxCXCYWH2XYXY, self).__init__()
def apply(self, sample, context=None):
assert 'gt_bbox' in sample
bbox0 = sample['gt_bbox']
bbox = bbox0.copy()
bbox[:, :2] = bbox0[:, :2] - bbox0[:, 2:4] / 2.
bbox[:, 2:4] = bbox0[:, :2] + bbox0[:, 2:4] / 2.
sample['gt_bbox'] = bbox
return sample
@register_op
class RandomResizeCrop(BaseOperator):
"""Random resize and crop image and bboxes.
Args:
resizes (list): resize image to one of resizes. if keep_ratio is True and mode is
'long', resize the image's long side to the maximum of target_size, if keep_ratio is
True and mode is 'short', resize the image's short side to the minimum of target_size.
cropsizes (list): crop sizes after resize, [(min_crop_1, max_crop_1), ...]
mode (str): resize mode, `long` or `short`. Details see resizes.
prob (float): probability of this op.
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): the interpolation method
thresholds (list): iou thresholds for decide a valid bbox crop.
num_attempts (int): number of tries before giving up.
allow_no_crop (bool): allow return without actually cropping them.
cover_all_box (bool): ensure all bboxes are covered in the final crop.
is_mask_crop(bool): whether crop the segmentation.
"""
def __init__(self,
resizes,
cropsizes,
prob=0.5,
mode='short',
keep_ratio=True,
interp=cv2.INTER_LINEAR,
num_attempts=3,
cover_all_box=False,
allow_no_crop=False,
thresholds=[0.3, 0.5, 0.7],
is_mask_crop=False,
ioumode="iou"):
super(RandomResizeCrop, self).__init__()
self.resizes = resizes
self.cropsizes = cropsizes
self.prob = prob
self.mode = mode
self.ioumode = ioumode
self.resizer = Resize(0, keep_ratio=keep_ratio, interp=interp)
self.croper = RandomCrop(
num_attempts=num_attempts,
cover_all_box=cover_all_box,
thresholds=thresholds,
allow_no_crop=allow_no_crop,
is_mask_crop=is_mask_crop)
def _format_size(self, size):
if isinstance(size, Integral):
size = (size, size)
return size
def apply(self, sample, context=None):
if random.random() < self.prob:
_resize = self._format_size(random.choice(self.resizes))
_cropsize = self._format_size(random.choice(self.cropsizes))
sample = self._resize(
self.resizer,
sample,
size=_resize,
mode=self.mode,
context=context)
sample = self._random_crop(
self.croper, sample, size=_cropsize, context=context)
return sample
@staticmethod
def _random_crop(croper, sample, size, context=None):
if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
return sample
self = croper
h, w = sample['image'].shape[:2]
gt_bbox = sample['gt_bbox']
cropsize = size
min_crop = min(cropsize)
max_crop = max(cropsize)
thresholds = list(self.thresholds)
np.random.shuffle(thresholds)
for thresh in thresholds:
found = False
for _ in range(self.num_attempts):
crop_h = random.randint(min_crop, min(h, max_crop))
crop_w = random.randint(min_crop, min(w, max_crop))
crop_y = random.randint(0, h - crop_h)
crop_x = random.randint(0, w - crop_w)
crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
if self.ioumode == "iof":
iou = self._gtcropiou_matrix(
gt_bbox, np.array(
[crop_box], dtype=np.float32))
elif self.ioumode == "iou":
iou = self._iou_matrix(
gt_bbox, np.array(
[crop_box], dtype=np.float32))
if iou.max() < thresh:
continue
if self.cover_all_box and iou.min() < thresh:
continue
cropped_box, valid_ids = self._crop_box_with_center_constraint(
gt_bbox, np.array(
crop_box, dtype=np.float32))
if valid_ids.size > 0:
found = True
break
if found:
if self.is_mask_crop and 'gt_poly' in sample and len(sample[
'gt_poly']) > 0:
crop_polys = self.crop_segms(
sample['gt_poly'],
valid_ids,
np.array(
crop_box, dtype=np.int64),
h,
w)
if [] in crop_polys:
delete_id = list()
valid_polys = list()
for id, crop_poly in enumerate(crop_polys):
if crop_poly == []:
delete_id.append(id)
else:
valid_polys.append(crop_poly)
valid_ids = np.delete(valid_ids, delete_id)
if len(valid_polys) == 0:
return sample
sample['gt_poly'] = valid_polys
else:
sample['gt_poly'] = crop_polys
if 'gt_segm' in sample:
sample['gt_segm'] = self._crop_segm(sample['gt_segm'],
crop_box)
sample['gt_segm'] = np.take(
sample['gt_segm'], valid_ids, axis=0)
sample['image'] = self._crop_image(sample['image'], crop_box)
sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
sample['gt_class'] = np.take(
sample['gt_class'], valid_ids, axis=0)
if 'gt_score' in sample:
sample['gt_score'] = np.take(
sample['gt_score'], valid_ids, axis=0)
if 'is_crowd' in sample:
sample['is_crowd'] = np.take(
sample['is_crowd'], valid_ids, axis=0)
if 'gt_areas' in sample:
sample['gt_areas'] = np.take(
sample['gt_areas'], valid_ids, axis=0)
if 'gt_joints' in sample:
gt_joints = self._crop_joints(sample['gt_joints'], crop_box)
sample['gt_joints'] = gt_joints[valid_ids]
return sample
return sample
@staticmethod
def _resize(resizer, sample, size, mode='short', context=None):
self = resizer
im = sample['image']
target_size = size
if not isinstance(im, np.ndarray):
raise TypeError("{}: image type is not numpy.".format(self))
if len(im.shape) != 3:
raise ImageError('{}: image is not 3-dimensional.'.format(self))
# apply image
im_shape = im.shape
if self.keep_ratio:
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
target_size_min = np.min(target_size)
target_size_max = np.max(target_size)
if mode == 'long':
im_scale = min(target_size_min / im_size_min,
target_size_max / im_size_max)
else:
im_scale = max(target_size_min / im_size_min,
target_size_max / im_size_max)
resize_h = int(im_scale * float(im_shape[0]) + 0.5)
resize_w = int(im_scale * float(im_shape[1]) + 0.5)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = target_size
im_scale_y = resize_h / im_shape[0]
im_scale_x = resize_w / im_shape[1]
im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])
sample['image'] = im
sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
else:
sample['scale_factor'] = np.asarray(
[im_scale_y, im_scale_x], dtype=np.float32)
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
[im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
semantic = sample['semantic']
semantic = cv2.resize(
semantic.astype('float32'),
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
semantic = np.asarray(semantic).astype('int32')
semantic = np.expand_dims(semantic, 0)
sample['semantic'] = semantic
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
masks = [
cv2.resize(
gt_segm,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=cv2.INTER_NEAREST)
for gt_segm in sample['gt_segm']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
if 'gt_joints' in sample:
sample['gt_joints'] = self.apply_joints(sample['gt_joints'],
[im_scale_x, im_scale_y],
[resize_w, resize_h])
return sample
@register_op
class RandomSelect(BaseOperator):
"""
Randomly choose a transformation between transforms1 and transforms2,
and the probability of choosing transforms1 is p.
The code is based on https://github.com/facebookresearch/detr/blob/main/datasets/transforms.py
"""
def __init__(self, transforms1, transforms2, p=0.5):
super(RandomSelect, self).__init__()
self.transforms1 = Compose(transforms1)
self.transforms2 = Compose(transforms2)
self.p = p
def apply(self, sample, context=None):
if random.random() < self.p:
return self.transforms1(sample)
return self.transforms2(sample)
@register_op
class RandomSelects(BaseOperator):
"""
Randomly choose a transformation between transforms1 and transforms2,
and the probability of choosing transforms1 is p.
The code is based on https://github.com/facebookresearch/detr/blob/main/datasets/transforms.py
"""
def __init__(self, transforms_list, p=None):
super(RandomSelects, self).__init__()
if p is not None:
assert isinstance(p, (list, tuple))
assert len(transforms_list) == len(p)
else:
assert len(transforms_list) > 0
self.transforms = [Compose(t) for t in transforms_list]
self.p = p
def apply(self, sample, context=None):
if self.p is None:
return random.choice(self.transforms)(sample)
else:
prob = random.random()
for p, t in zip(self.p, self.transforms):
if prob <= p:
return t(sample)
@register_op
class RandomShortSideResize(BaseOperator):
def __init__(self,
short_side_sizes,
max_size=None,
interp=cv2.INTER_LINEAR,
random_interp=False):
"""
Resize the image randomly according to the short side. If max_size is not None,
the long side is scaled according to max_size. The whole process will be keep ratio.
Args:
short_side_sizes (list|tuple): Image target short side size.
max_size (int): The size of the longest side of image after resize.
interp (int): The interpolation method.
random_interp (bool): Whether random select interpolation method.
"""
super(RandomShortSideResize, self).__init__()
assert isinstance(short_side_sizes,
Sequence), "short_side_sizes must be List or Tuple"
self.short_side_sizes = short_side_sizes
self.max_size = max_size
self.interp = interp
self.random_interp = random_interp
self.interps = [
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_AREA,
cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4,
]
def get_size_with_aspect_ratio(self, image_shape, size, max_size=None):
h, w = image_shape
max_clip = False
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(max_size * min_original_size / max_original_size)
max_clip = True
if (w <= h and w == size) or (h <= w and h == size):
return (w, h)
if w < h:
ow = size
oh = int(round(size * h / w)) if not max_clip else max_size
else:
oh = size
ow = int(round(size * w / h)) if not max_clip else max_size
return (ow, oh)
def resize(self,
sample,
target_size,
max_size=None,
interp=cv2.INTER_LINEAR):
im = sample['image']
if not isinstance(im, np.ndarray):
raise TypeError("{}: image type is not numpy.".format(self))
if len(im.shape) != 3:
raise ImageError('{}: image is not 3-dimensional.'.format(self))
target_size = self.get_size_with_aspect_ratio(im.shape[:2], target_size,
max_size)
im_scale_y, im_scale_x = target_size[1] / im.shape[0], target_size[
0] / im.shape[1]
sample['image'] = cv2.resize(im, target_size, interpolation=interp)
sample['im_shape'] = np.asarray(target_size[::-1], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
else:
sample['scale_factor'] = np.asarray(
[im_scale_y, im_scale_x], dtype=np.float32)
# apply bbox
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(
sample['gt_bbox'], [im_scale_x, im_scale_y], target_size)
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im.shape[:2],
[im_scale_x, im_scale_y])
# apply semantic
if 'semantic' in sample and sample['semantic']:
semantic = sample['semantic']
semantic = cv2.resize(
semantic.astype('float32'),
target_size,
interpolation=self.interp)
semantic = np.asarray(semantic).astype('int32')
semantic = np.expand_dims(semantic, 0)
sample['semantic'] = semantic
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
masks = [
cv2.resize(
gt_segm, target_size, interpolation=cv2.INTER_NEAREST)
for gt_segm in sample['gt_segm']
]
sample['gt_segm'] = np.asarray(masks).astype(np.uint8)
if 'gt_joints' in sample:
sample['gt_joints'] = self.apply_joints(
sample['gt_joints'], [im_scale_x, im_scale_y], target_size)
# apply areas
if 'gt_areas' in sample:
sample['gt_areas'] = self.apply_area(sample['gt_areas'],
[im_scale_x, im_scale_y])
return sample
def apply_bbox(self, bbox, scale, size):
im_scale_x, im_scale_y = scale
resize_w, resize_h = size
bbox[:, 0::2] *= im_scale_x
bbox[:, 1::2] *= im_scale_y
bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w)
bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h)
return bbox.astype('float32')
def apply_joints(self, joints, scale, size):
im_scale_x, im_scale_y = scale
resize_w, resize_h = size
joints[..., 0] *= im_scale_x
joints[..., 1] *= im_scale_y
# joints[joints[..., 0] >= resize_w, :] = 0
# joints[joints[..., 1] >= resize_h, :] = 0
# joints[joints[..., 0] < 0, :] = 0
# joints[joints[..., 1] < 0, :] = 0
joints[..., 0] = np.clip(joints[..., 0], 0, resize_w)
joints[..., 1] = np.clip(joints[..., 1], 0, resize_h)
return joints
def apply_area(self, area, scale):
im_scale_x, im_scale_y = scale
return area * im_scale_x * im_scale_y
def apply_segm(self, segms, im_size, scale):
def _resize_poly(poly, im_scale_x, im_scale_y):
resized_poly = np.array(poly).astype('float32')
resized_poly[0::2] *= im_scale_x
resized_poly[1::2] *= im_scale_y
return resized_poly.tolist()
def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, im_h, im_w)
mask = mask_util.decode(rle)
mask = cv2.resize(
mask,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
im_h, im_w = im_size
im_scale_x, im_scale_y = scale
resized_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
resized_segms.append([
_resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
])
else:
# RLE format
import pycocotools.mask as mask_util
resized_segms.append(
_resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
return resized_segms
def apply(self, sample, context=None):
target_size = random.choice(self.short_side_sizes)
interp = random.choice(
self.interps) if self.random_interp else self.interp
return self.resize(sample, target_size, self.max_size, interp)
@register_op
class RandomShortSideRangeResize(RandomShortSideResize):
def __init__(self, scales, interp=cv2.INTER_LINEAR, random_interp=False):
"""
Resize the image randomly according to the short side. If max_size is not None,
the long side is scaled according to max_size. The whole process will be keep ratio.
Args:
short_side_sizes (list|tuple): Image target short side size.
interp (int): The interpolation method.
random_interp (bool): Whether random select interpolation method.
"""
super(RandomShortSideRangeResize, self).__init__(scales, None, interp,
random_interp)
assert isinstance(scales,
Sequence), "short_side_sizes must be List or Tuple"
self.scales = scales
def random_sample(self, img_scales):
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long), max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short), max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale
def apply(self, sample, context=None):
long_edge, short_edge = self.random_sample(self.short_side_sizes)
# print("target size:{}".format((long_edge, short_edge)))
interp = random.choice(
self.interps) if self.random_interp else self.interp
return self.resize(sample, short_edge, long_edge, interp)
@register_op
class RandomSizeCrop(BaseOperator):
"""
Cut the image randomly according to `min_size` and `max_size`
Args:
min_size (int): Min size for edges of cropped image.
max_size (int): Max size for edges of cropped image. If it
is set to larger than length of the input image,
the output will keep the origin length.
keep_empty (bool): Whether to keep the cropped result with no object.
If it is set to False, the no-object result will not
be returned, replaced by the original input.
"""
def __init__(self, min_size, max_size, keep_empty=True):
super(RandomSizeCrop, self).__init__()
self.min_size = min_size
self.max_size = max_size
self.keep_empty = keep_empty
from paddle.vision.transforms.functional import crop as paddle_crop
self.paddle_crop = paddle_crop
@staticmethod
def get_crop_params(img_shape, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img_shape (list|tuple): Image's height and width.
output_size (list|tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
h, w = img_shape
th, tw = output_size
if h + 1 < th or w + 1 < tw:
raise ValueError(
"Required crop size {} is larger then input image size {}".
format((th, tw), (h, w)))
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th + 1)
j = random.randint(0, w - tw + 1)
return i, j, th, tw
def crop(self, sample, region):
keep_index = None
# apply bbox and check whether the cropped result is valid
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
croped_bbox = self.apply_bbox(sample['gt_bbox'], region)
bbox = croped_bbox.reshape([-1, 2, 2])
area = (bbox[:, 1, :] - bbox[:, 0, :]).prod(axis=1)
keep_index = np.where(area > 0)[0]
if not self.keep_empty and len(keep_index) == 0:
# When keep_empty is set to False, cropped with no-object will
# not be used and return the origin content.
return sample
sample['gt_bbox'] = croped_bbox[keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 4], dtype=np.float32)
sample['gt_class'] = sample['gt_class'][keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 1], dtype=np.float32)
if 'gt_score' in sample:
sample['gt_score'] = sample['gt_score'][keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 1], dtype=np.float32)
if 'is_crowd' in sample:
sample['is_crowd'] = sample['is_crowd'][keep_index] if len(
keep_index) > 0 else np.zeros(
[0, 1], dtype=np.float32)
if 'gt_areas' in sample:
sample['gt_areas'] = np.take(
sample['gt_areas'], keep_index, axis=0)
image_shape = sample['image'].shape[:2]
sample['image'] = self.paddle_crop(sample['image'], *region)
sample['im_shape'] = np.array(
sample['image'].shape[:2], dtype=np.float32)
# apply polygon
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], region,
image_shape)
sample['gt_poly'] = np.array(sample['gt_poly'])
if keep_index is not None and len(keep_index) > 0:
sample['gt_poly'] = sample['gt_poly'][keep_index]
sample['gt_poly'] = sample['gt_poly'].tolist()
# apply gt_segm
if 'gt_segm' in sample and len(sample['gt_segm']) > 0:
i, j, h, w = region
sample['gt_segm'] = sample['gt_segm'][:, i:i + h, j:j + w]
if keep_index is not None and len(keep_index) > 0:
sample['gt_segm'] = sample['gt_segm'][keep_index]
if 'gt_joints' in sample:
gt_joints = self._crop_joints(sample['gt_joints'], region)
sample['gt_joints'] = gt_joints
if keep_index is not None:
sample['gt_joints'] = sample['gt_joints'][keep_index]
return sample
def apply_bbox(self, bbox, region):
i, j, h, w = region
region_size = np.asarray([w, h])
crop_bbox = bbox - np.asarray([j, i, j, i])
crop_bbox = np.minimum(crop_bbox.reshape([-1, 2, 2]), region_size)
crop_bbox = crop_bbox.clip(min=0)
return crop_bbox.reshape([-1, 4]).astype('float32')
def _crop_joints(self, joints, region):
y1, x1, h, w = region
x2 = x1 + w
y2 = y1 + h
# x1, y1, x2, y2 = crop
joints[..., 0] -= x1
joints[..., 1] -= y1
joints[joints[..., 0] > w, :] = 0
joints[joints[..., 1] > h, :] = 0
joints[joints[..., 0] < 0, :] = 0
joints[joints[..., 1] < 0, :] = 0
return joints
def apply_segm(self, segms, region, image_shape):
def _crop_poly(segm, crop):
xmin, ymin, xmax, ymax = crop
crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]
crop_p = np.array(crop_coord).reshape(4, 2)
crop_p = Polygon(crop_p)
crop_segm = list()
for poly in segm:
poly = np.array(poly).reshape(len(poly) // 2, 2)
polygon = Polygon(poly)
if not polygon.is_valid:
exterior = polygon.exterior
multi_lines = exterior.intersection(exterior)
polygons = shapely.ops.polygonize(multi_lines)
polygon = MultiPolygon(polygons)
multi_polygon = list()
if isinstance(polygon, MultiPolygon):
multi_polygon = copy.deepcopy(polygon)
else:
multi_polygon.append(copy.deepcopy(polygon))
for per_polygon in multi_polygon:
inter = per_polygon.intersection(crop_p)
if not inter:
continue
if isinstance(inter, (MultiPolygon, GeometryCollection)):
for part in inter:
if not isinstance(part, Polygon):
continue
part = np.squeeze(
np.array(part.exterior.coords[:-1]).reshape(1,
-1))
part[0::2] -= xmin
part[1::2] -= ymin
crop_segm.append(part.tolist())
elif isinstance(inter, Polygon):
crop_poly = np.squeeze(
np.array(inter.exterior.coords[:-1]).reshape(1, -1))
crop_poly[0::2] -= xmin
crop_poly[1::2] -= ymin
crop_segm.append(crop_poly.tolist())
else:
continue
return crop_segm
def _crop_rle(rle, crop, height, width):
if 'counts' in rle and type(rle['counts']) == list:
rle = mask_util.frPyObjects(rle, height, width)
mask = mask_util.decode(rle)
mask = mask[crop[1]:crop[3], crop[0]:crop[2]]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
i, j, h, w = region
crop = [j, i, j + w, i + h]
height, width = image_shape
crop_segms = []
for segm in segms:
if is_poly(segm):
import copy
import shapely.ops
from shapely.geometry import Polygon, MultiPolygon, GeometryCollection
# Polygon format
crop_segms.append(_crop_poly(segm, crop))
else:
# RLE format
import pycocotools.mask as mask_util
crop_segms.append(_crop_rle(segm, crop, height, width))
return crop_segms
def apply(self, sample, context=None):
h = random.randint(self.min_size,
min(sample['image'].shape[0], self.max_size))
w = random.randint(self.min_size,
min(sample['image'].shape[1], self.max_size))
region = self.get_crop_params(sample['image'].shape[:2], [h, w])
return self.crop(sample, region)
@register_op
class WarpAffine(BaseOperator):
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
scale=0.4,
shift=0.1,
down_ratio=4):
"""WarpAffine
Warp affine the image
The code is based on https://github.com/xingyizhou/CenterNet/blob/master/src/lib/datasets/sample/ctdet.py
"""
super(WarpAffine, self).__init__()
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.scale = scale
self.shift = shift
self.down_ratio = down_ratio
def apply(self, sample, context=None):
img = sample['image']
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
h, w = img.shape[:2]
if self.keep_res:
# True in detection eval/infer
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
# False in centertrack eval_mot/eval_mot
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
img = cv2.resize(img, (w, h))
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
sample['image'] = inp
if not self.keep_res:
out_h = input_h // self.down_ratio
out_w = input_w // self.down_ratio
trans_output = get_affine_transform(c, s, 0, [out_w, out_h])
sample.update({
'center': c,
'scale': s,
'out_height': out_h,
'out_width': out_w,
'inp_height': input_h,
'inp_width': input_w,
'trans_input': trans_input,
'trans_output': trans_output,
})
return sample
@register_op
class FlipWarpAffine(BaseOperator):
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
not_rand_crop=False,
scale=0.4,
shift=0.1,
flip=0.5,
is_scale=True,
use_random=True,
add_pre_img=False):
"""FlipWarpAffine
1. Random Crop
2. Flip the image horizontal
3. Warp affine the image
4. (Optinal) Add previous image
"""
super(FlipWarpAffine, self).__init__()
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.not_rand_crop = not_rand_crop
self.scale = scale
self.shift = shift
self.flip = flip
self.is_scale = is_scale
self.use_random = use_random
self.add_pre_img = add_pre_img
def __call__(self, samples, context=None):
if self.add_pre_img:
assert isinstance(samples, Sequence) and len(samples) == 2
sample, pre_sample = samples[0], samples[1]
else:
sample = samples
img = sample['image']
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:
return sample
h, w = img.shape[:2]
flipped = 0
if self.keep_res:
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
# centernet training default
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
if self.use_random:
gt_bbox = sample['gt_bbox']
if not self.not_rand_crop:
# centernet default
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = get_border(128, w)
h_border = get_border(128, h)
c[0] = np.random.randint(low=w_border, high=w - w_border)
c[1] = np.random.randint(low=h_border, high=h - h_border)
else:
sf = self.scale
cf = self.shift
c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.flip:
img = img[:, ::-1, :]
c[0] = w - c[0] - 1
oldx1 = gt_bbox[:, 0].copy()
oldx2 = gt_bbox[:, 2].copy()
gt_bbox[:, 0] = w - oldx2 - 1
gt_bbox[:, 2] = w - oldx1 - 1
flipped = 1
sample['gt_bbox'] = gt_bbox
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
if self.is_scale:
inp = (inp.astype(np.float32) / 255.)
sample['image'] = inp
sample['center'] = c
sample['scale'] = s
if self.add_pre_img:
sample['trans_input'] = trans_input
# previous image, use same aug trans_input as current image
pre_img = pre_sample['image']
pre_img = cv2.cvtColor(pre_img, cv2.COLOR_RGB2BGR)
if flipped:
pre_img = pre_img[:, ::-1, :].copy()
pre_inp = cv2.warpAffine(
pre_img,
trans_input, (input_w, input_h),
flags=cv2.INTER_LINEAR)
if self.is_scale:
pre_inp = (pre_inp.astype(np.float32) / 255.)
sample['pre_image'] = pre_inp
# if empty gt_bbox
if 'gt_bbox' in pre_sample and len(pre_sample['gt_bbox']) == 0:
return sample
pre_gt_bbox = pre_sample['gt_bbox']
if flipped:
pre_oldx1 = pre_gt_bbox[:, 0].copy()
pre_oldx2 = pre_gt_bbox[:, 2].copy()
pre_gt_bbox[:, 0] = w - pre_oldx1 - 1
pre_gt_bbox[:, 2] = w - pre_oldx2 - 1
sample['pre_gt_bbox'] = pre_gt_bbox
sample['pre_gt_class'] = pre_sample['gt_class']
sample['pre_gt_track_id'] = pre_sample['gt_track_id']
del pre_sample
return sample
@register_op
class CenterRandColor(BaseOperator):
"""Random color for CenterNet series models.
Args:
saturation (float): saturation settings.
contrast (float): contrast settings.
brightness (float): brightness settings.
"""
def __init__(self, saturation=0.4, contrast=0.4, brightness=0.4):
super(CenterRandColor, self).__init__()
self.saturation = saturation
self.contrast = contrast
self.brightness = brightness
def apply_saturation(self, img, img_gray):
alpha = 1. + np.random.uniform(
low=-self.saturation, high=self.saturation)
self._blend(alpha, img, img_gray[:, :, None])
return img
def apply_contrast(self, img, img_gray):
alpha = 1. + np.random.uniform(low=-self.contrast, high=self.contrast)
img_mean = img_gray.mean()
self._blend(alpha, img, img_mean)
return img
def apply_brightness(self, img, img_gray):
alpha = 1 + np.random.uniform(
low=-self.brightness, high=self.brightness)
img *= alpha
return img
def _blend(self, alpha, img, img_mean):
img *= alpha
img_mean *= (1 - alpha)
img += img_mean
def apply(self, sample, context=None):
functions = [
self.apply_brightness,
self.apply_contrast,
self.apply_saturation,
]
img = sample['image']
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
distortions = np.random.permutation(functions)
for func in distortions:
img = func(img, img_gray)
sample['image'] = img
if 'pre_image' in sample:
pre_img = sample['pre_image']
pre_img_gray = cv2.cvtColor(pre_img, cv2.COLOR_BGR2GRAY)
pre_distortions = np.random.permutation(functions)
for func in pre_distortions:
pre_img = func(pre_img, pre_img_gray)
sample['pre_image'] = pre_img
return sample
@register_op
class Mosaic(BaseOperator):
""" Mosaic operator for image and gt_bboxes
The code is based on https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/data/datasets/mosaicdetection.py
1. get mosaic coords
2. clip bbox and get mosaic_labels
3. random_affine augment
4. Mixup augment as copypaste (optinal), not used in tiny/nano
Args:
prob (float): probability of using Mosaic, 1.0 as default
input_dim (list[int]): input shape
degrees (list[2]): the rotate range to apply, transform range is [min, max]
translate (list[2]): the translate range to apply, transform range is [min, max]
scale (list[2]): the scale range to apply, transform range is [min, max]
shear (list[2]): the shear range to apply, transform range is [min, max]
enable_mixup (bool): whether to enable Mixup or not
mixup_prob (float): probability of using Mixup, 1.0 as default
mixup_scale (list[int]): scale range of Mixup
remove_outside_box (bool): whether remove outside boxes, False as
default in COCO dataset, True in MOT dataset
"""
def __init__(self,
prob=1.0,
input_dim=[640, 640],
degrees=[-10, 10],
translate=[-0.1, 0.1],
scale=[0.1, 2],
shear=[-2, 2],
enable_mixup=True,
mixup_prob=1.0,
mixup_scale=[0.5, 1.5],
remove_outside_box=False):
super(Mosaic, self).__init__()
self.prob = prob
if isinstance(input_dim, Integral):
input_dim = [input_dim, input_dim]
self.input_dim = input_dim
self.degrees = degrees
self.translate = translate
self.scale = scale
self.shear = shear
self.enable_mixup = enable_mixup
self.mixup_prob = mixup_prob
self.mixup_scale = mixup_scale
self.remove_outside_box = remove_outside_box
def get_mosaic_coords(self, mosaic_idx, xc, yc, w, h, input_h, input_w):
# (x1, y1, x2, y2) means coords in large image,
# small_coords means coords in small image in mosaic aug.
if mosaic_idx == 0:
# top left
x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc
small_coords = w - (x2 - x1), h - (y2 - y1), w, h
elif mosaic_idx == 1:
# top right
x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, input_w * 2), yc
small_coords = 0, h - (y2 - y1), min(w, x2 - x1), h
elif mosaic_idx == 2:
# bottom left
x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(input_h * 2, yc + h)
small_coords = w - (x2 - x1), 0, w, min(y2 - y1, h)
elif mosaic_idx == 3:
# bottom right
x1, y1, x2, y2 = xc, yc, min(xc + w, input_w * 2), min(input_h * 2,
yc + h)
small_coords = 0, 0, min(w, x2 - x1), min(y2 - y1, h)
return (x1, y1, x2, y2), small_coords
def random_affine_augment(self,
img,
labels=[],
input_dim=[640, 640],
degrees=[-10, 10],
scales=[0.1, 2],
shears=[-2, 2],
translates=[-0.1, 0.1]):
# random rotation and scale
degree = random.uniform(degrees[0], degrees[1])
scale = random.uniform(scales[0], scales[1])
assert scale > 0, "Argument scale should be positive."
R = cv2.getRotationMatrix2D(angle=degree, center=(0, 0), scale=scale)
M = np.ones([2, 3])
# random shear
shear = random.uniform(shears[0], shears[1])
shear_x = math.tan(shear * math.pi / 180)
shear_y = math.tan(shear * math.pi / 180)
M[0] = R[0] + shear_y * R[1]
M[1] = R[1] + shear_x * R[0]
# random translation
translate = random.uniform(translates[0], translates[1])
translation_x = translate * input_dim[0]
translation_y = translate * input_dim[1]
M[0, 2] = translation_x
M[1, 2] = translation_y
# warpAffine
img = cv2.warpAffine(
img, M, dsize=tuple(input_dim), borderValue=(114, 114, 114))
num_gts = len(labels)
if num_gts > 0:
# warp corner points
corner_points = np.ones((4 * num_gts, 3))
corner_points[:, :2] = labels[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
4 * num_gts, 2) # x1y1, x2y2, x1y2, x2y1
# apply affine transform
corner_points = corner_points @M.T
corner_points = corner_points.reshape(num_gts, 8)
# create new boxes
corner_xs = corner_points[:, 0::2]
corner_ys = corner_points[:, 1::2]
new_bboxes = np.concatenate((corner_xs.min(1), corner_ys.min(1),
corner_xs.max(1), corner_ys.max(1)))
new_bboxes = new_bboxes.reshape(4, num_gts).T
# clip boxes
new_bboxes[:, 0::2] = np.clip(new_bboxes[:, 0::2], 0, input_dim[0])
new_bboxes[:, 1::2] = np.clip(new_bboxes[:, 1::2], 0, input_dim[1])
labels[:, :4] = new_bboxes
return img, labels
def __call__(self, sample, context=None):
if not isinstance(sample, Sequence):
return sample
assert len(
sample) == 5, "Mosaic needs 5 samples, 4 for mosaic and 1 for mixup."
if np.random.uniform(0., 1.) > self.prob:
return sample[0]
mosaic_gt_bbox, mosaic_gt_class, mosaic_is_crowd, mosaic_difficult = [], [], [], []
input_h, input_w = self.input_dim
yc = int(random.uniform(0.5 * input_h, 1.5 * input_h))
xc = int(random.uniform(0.5 * input_w, 1.5 * input_w))
mosaic_img = np.full((input_h * 2, input_w * 2, 3), 114, dtype=np.uint8)
# 1. get mosaic coords
for mosaic_idx, sp in enumerate(sample[:4]):
img = sp['image']
gt_bbox = sp['gt_bbox']
h0, w0 = img.shape[:2]
scale = min(1. * input_h / h0, 1. * input_w / w0)
img = cv2.resize(
img, (int(w0 * scale), int(h0 * scale)),
interpolation=cv2.INTER_LINEAR)
(h, w, c) = img.shape[:3]
# suffix l means large image, while s means small image in mosaic aug.
(l_x1, l_y1, l_x2, l_y2), (
s_x1, s_y1, s_x2, s_y2) = self.get_mosaic_coords(
mosaic_idx, xc, yc, w, h, input_h, input_w)
mosaic_img[l_y1:l_y2, l_x1:l_x2] = img[s_y1:s_y2, s_x1:s_x2]
padw, padh = l_x1 - s_x1, l_y1 - s_y1
# Normalized xywh to pixel xyxy format
_gt_bbox = gt_bbox.copy()
if len(gt_bbox) > 0:
_gt_bbox[:, 0] = scale * gt_bbox[:, 0] + padw
_gt_bbox[:, 1] = scale * gt_bbox[:, 1] + padh
_gt_bbox[:, 2] = scale * gt_bbox[:, 2] + padw
_gt_bbox[:, 3] = scale * gt_bbox[:, 3] + padh
mosaic_gt_bbox.append(_gt_bbox)
mosaic_gt_class.append(sp['gt_class'])
if 'is_crowd' in sp:
mosaic_is_crowd.append(sp['is_crowd'])
if 'difficult' in sp:
mosaic_difficult.append(sp['difficult'])
# 2. clip bbox and get mosaic_labels([gt_bbox, gt_class, is_crowd])
if len(mosaic_gt_bbox):
mosaic_gt_bbox = np.concatenate(mosaic_gt_bbox, 0)
mosaic_gt_class = np.concatenate(mosaic_gt_class, 0)
if mosaic_is_crowd:
mosaic_is_crowd = np.concatenate(mosaic_is_crowd, 0)
mosaic_labels = np.concatenate([
mosaic_gt_bbox,
mosaic_gt_class.astype(mosaic_gt_bbox.dtype),
mosaic_is_crowd.astype(mosaic_gt_bbox.dtype)
], 1)
elif mosaic_difficult:
mosaic_difficult = np.concatenate(mosaic_difficult, 0)
mosaic_labels = np.concatenate([
mosaic_gt_bbox,
mosaic_gt_class.astype(mosaic_gt_bbox.dtype),
mosaic_difficult.astype(mosaic_gt_bbox.dtype)
], 1)
else:
mosaic_labels = np.concatenate([
mosaic_gt_bbox, mosaic_gt_class.astype(mosaic_gt_bbox.dtype)
], 1)
if self.remove_outside_box:
# for MOT dataset
flag1 = mosaic_gt_bbox[:, 0] < 2 * input_w
flag2 = mosaic_gt_bbox[:, 2] > 0
flag3 = mosaic_gt_bbox[:, 1] < 2 * input_h
flag4 = mosaic_gt_bbox[:, 3] > 0
flag_all = flag1 * flag2 * flag3 * flag4
mosaic_labels = mosaic_labels[flag_all]
else:
mosaic_labels[:, 0] = np.clip(mosaic_labels[:, 0], 0,
2 * input_w)
mosaic_labels[:, 1] = np.clip(mosaic_labels[:, 1], 0,
2 * input_h)
mosaic_labels[:, 2] = np.clip(mosaic_labels[:, 2], 0,
2 * input_w)
mosaic_labels[:, 3] = np.clip(mosaic_labels[:, 3], 0,
2 * input_h)
else:
mosaic_labels = np.zeros((1, 6))
# 3. random_affine augment
mosaic_img, mosaic_labels = self.random_affine_augment(
mosaic_img,
mosaic_labels,
input_dim=self.input_dim,
degrees=self.degrees,
translates=self.translate,
scales=self.scale,
shears=self.shear)
# 4. Mixup augment as copypaste, https://arxiv.org/abs/2012.07177
# optinal, not used(enable_mixup=False) in tiny/nano
if (self.enable_mixup and not len(mosaic_labels) == 0 and
random.random() < self.mixup_prob):
sample_mixup = sample[4]
mixup_img = sample_mixup['image']
if 'is_crowd' in sample_mixup:
cp_labels = np.concatenate([
sample_mixup['gt_bbox'],
sample_mixup['gt_class'].astype(mosaic_labels.dtype),
sample_mixup['is_crowd'].astype(mosaic_labels.dtype)
], 1)
elif 'difficult' in sample_mixup:
cp_labels = np.concatenate([
sample_mixup['gt_bbox'],
sample_mixup['gt_class'].astype(mosaic_labels.dtype),
sample_mixup['difficult'].astype(mosaic_labels.dtype)
], 1)
else:
cp_labels = np.concatenate([
sample_mixup['gt_bbox'],
sample_mixup['gt_class'].astype(mosaic_labels.dtype)
], 1)
mosaic_img, mosaic_labels = self.mixup_augment(
mosaic_img, mosaic_labels, self.input_dim, cp_labels, mixup_img)
sample0 = sample[0]
sample0['image'] = mosaic_img.astype(np.uint8) # can not be float32
sample0['h'] = float(mosaic_img.shape[0])
sample0['w'] = float(mosaic_img.shape[1])
sample0['im_shape'][0] = sample0['h']
sample0['im_shape'][1] = sample0['w']
sample0['gt_bbox'] = mosaic_labels[:, :4].astype(np.float32)
sample0['gt_class'] = mosaic_labels[:, 4:5].astype(np.float32)
if 'is_crowd' in sample[0]:
sample0['is_crowd'] = mosaic_labels[:, 5:6].astype(np.float32)
if 'difficult' in sample[0]:
sample0['difficult'] = mosaic_labels[:, 5:6].astype(np.float32)
return sample0
def mixup_augment(self, origin_img, origin_labels, input_dim, cp_labels,
img):
jit_factor = random.uniform(*self.mixup_scale)
FLIP = random.uniform(0, 1) > 0.5
if len(img.shape) == 3:
cp_img = np.ones(
(input_dim[0], input_dim[1], 3), dtype=np.uint8) * 114
else:
cp_img = np.ones(input_dim, dtype=np.uint8) * 114
cp_scale_ratio = min(input_dim[0] / img.shape[0],
input_dim[1] / img.shape[1])
resized_img = cv2.resize(
img, (int(img.shape[1] * cp_scale_ratio),
int(img.shape[0] * cp_scale_ratio)),
interpolation=cv2.INTER_LINEAR)
cp_img[:int(img.shape[0] * cp_scale_ratio), :int(img.shape[
1] * cp_scale_ratio)] = resized_img
cp_img = cv2.resize(cp_img, (int(cp_img.shape[1] * jit_factor),
int(cp_img.shape[0] * jit_factor)))
cp_scale_ratio *= jit_factor
if FLIP:
cp_img = cp_img[:, ::-1, :]
origin_h, origin_w = cp_img.shape[:2]
target_h, target_w = origin_img.shape[:2]
padded_img = np.zeros(
(max(origin_h, target_h), max(origin_w, target_w), 3),
dtype=np.uint8)
padded_img[:origin_h, :origin_w] = cp_img
x_offset, y_offset = 0, 0
if padded_img.shape[0] > target_h:
y_offset = random.randint(0, padded_img.shape[0] - target_h - 1)
if padded_img.shape[1] > target_w:
x_offset = random.randint(0, padded_img.shape[1] - target_w - 1)
padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:
x_offset + target_w]
# adjust boxes
cp_bboxes_origin_np = cp_labels[:, :4].copy()
cp_bboxes_origin_np[:, 0::2] = np.clip(cp_bboxes_origin_np[:, 0::2] *
cp_scale_ratio, 0, origin_w)
cp_bboxes_origin_np[:, 1::2] = np.clip(cp_bboxes_origin_np[:, 1::2] *
cp_scale_ratio, 0, origin_h)
if FLIP:
cp_bboxes_origin_np[:, 0::2] = (
origin_w - cp_bboxes_origin_np[:, 0::2][:, ::-1])
cp_bboxes_transformed_np = cp_bboxes_origin_np.copy()
if self.remove_outside_box:
# for MOT dataset
cp_bboxes_transformed_np[:, 0::2] -= x_offset
cp_bboxes_transformed_np[:, 1::2] -= y_offset
else:
cp_bboxes_transformed_np[:, 0::2] = np.clip(
cp_bboxes_transformed_np[:, 0::2] - x_offset, 0, target_w)
cp_bboxes_transformed_np[:, 1::2] = np.clip(
cp_bboxes_transformed_np[:, 1::2] - y_offset, 0, target_h)
cls_labels = cp_labels[:, 4:5].copy()
box_labels = cp_bboxes_transformed_np
if cp_labels.shape[-1] == 6:
crd_labels = cp_labels[:, 5:6].copy()
labels = np.hstack((box_labels, cls_labels, crd_labels))
else:
labels = np.hstack((box_labels, cls_labels))
if self.remove_outside_box:
labels = labels[labels[:, 0] < target_w]
labels = labels[labels[:, 2] > 0]
labels = labels[labels[:, 1] < target_h]
labels = labels[labels[:, 3] > 0]
origin_labels = np.vstack((origin_labels, labels))
origin_img = origin_img.astype(np.float32)
origin_img = 0.5 * origin_img + 0.5 * padded_cropped_img.astype(
np.float32)
return origin_img.astype(np.uint8), origin_labels
@register_op
class PadResize(BaseOperator):
""" PadResize for image and gt_bbbox
Args:
target_size (list[int]): input shape
fill_value (float): pixel value of padded image
"""
def __init__(self, target_size, fill_value=114):
super(PadResize, self).__init__()
if isinstance(target_size, Integral):
target_size = [target_size, target_size]
self.target_size = target_size
self.fill_value = fill_value
def _resize(self, img, bboxes, labels):
ratio = min(self.target_size[0] / img.shape[0],
self.target_size[1] / img.shape[1])
w, h = int(img.shape[1] * ratio), int(img.shape[0] * ratio)
resized_img = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
if len(bboxes) > 0:
bboxes *= ratio
mask = np.minimum(bboxes[:, 2] - bboxes[:, 0],
bboxes[:, 3] - bboxes[:, 1]) > 1
bboxes = bboxes[mask]
labels = labels[mask]
return resized_img, bboxes, labels
def _pad(self, img):
h, w, _ = img.shape
if h == self.target_size[0] and w == self.target_size[1]:
return img
padded_img = np.full(
(self.target_size[0], self.target_size[1], 3),
self.fill_value,
dtype=np.uint8)
padded_img[:h, :w] = img
return padded_img
def apply(self, sample, context=None):
image = sample['image']
bboxes = sample['gt_bbox']
labels = sample['gt_class']
image, bboxes, labels = self._resize(image, bboxes, labels)
sample['image'] = self._pad(image).astype(np.float32)
sample['gt_bbox'] = bboxes
sample['gt_class'] = labels
return sample
@register_op
class RandomShift(BaseOperator):
"""
Randomly shift image
Args:
prob (float): probability to do random shift.
max_shift (int): max shift pixels
filter_thr (int): filter gt bboxes if one side is smaller than this
"""
def __init__(self, prob=0.5, max_shift=32, filter_thr=1):
super(RandomShift, self).__init__()
self.prob = prob
self.max_shift = max_shift
self.filter_thr = filter_thr
def calc_shift_coor(self, im_h, im_w, shift_h, shift_w):
return [
max(0, shift_w), max(0, shift_h), min(im_w, im_w + shift_w),
min(im_h, im_h + shift_h)
]
def apply(self, sample, context=None):
if random.random() > self.prob:
return sample
im = sample['image']
gt_bbox = sample['gt_bbox']
gt_class = sample['gt_class']
im_h, im_w = im.shape[:2]
shift_h = random.randint(-self.max_shift, self.max_shift)
shift_w = random.randint(-self.max_shift, self.max_shift)
gt_bbox[:, 0::2] += shift_w
gt_bbox[:, 1::2] += shift_h
gt_bbox[:, 0::2] = np.clip(gt_bbox[:, 0::2], 0, im_w)
gt_bbox[:, 1::2] = np.clip(gt_bbox[:, 1::2], 0, im_h)
gt_bbox_h = gt_bbox[:, 2] - gt_bbox[:, 0]
gt_bbox_w = gt_bbox[:, 3] - gt_bbox[:, 1]
keep = (gt_bbox_w > self.filter_thr) & (gt_bbox_h > self.filter_thr)
if not keep.any():
return sample
gt_bbox = gt_bbox[keep]
gt_class = gt_class[keep]
# shift image
coor_new = self.calc_shift_coor(im_h, im_w, shift_h, shift_w)
# shift frame to the opposite direction
coor_old = self.calc_shift_coor(im_h, im_w, -shift_h, -shift_w)
canvas = np.zeros_like(im)
canvas[coor_new[1]:coor_new[3], coor_new[0]:coor_new[2]] \
= im[coor_old[1]:coor_old[3], coor_old[0]:coor_old[2]]
sample['image'] = canvas
sample['gt_bbox'] = gt_bbox
sample['gt_class'] = gt_class
return sample
@register_op
class StrongAugImage(BaseOperator):
def __init__(self, transforms):
super(StrongAugImage, self).__init__()
self.transforms = Compose(transforms)
def apply(self, sample, context=None):
im = sample
im['image'] = sample['image'].astype('uint8')
results = self.transforms(im)
sample['image'] = results['image'].astype('uint8')
return sample
@register_op
class RandomColorJitter(BaseOperator):
def __init__(self,
prob=0.8,
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.1):
super(RandomColorJitter, self).__init__()
self.prob = prob
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def apply(self, sample, context=None):
if np.random.uniform(0, 1) < self.prob:
from paddle.vision.transforms import ColorJitter
transform = ColorJitter(self.brightness, self.contrast,
self.saturation, self.hue)
sample['image'] = transform(sample['image'].astype(np.uint8))
sample['image'] = sample['image'].astype(np.float32)
return sample
@register_op
class RandomGrayscale(BaseOperator):
def __init__(self, prob=0.2):
super(RandomGrayscale, self).__init__()
self.prob = prob
def apply(self, sample, context=None):
if np.random.uniform(0, 1) < self.prob:
from paddle.vision.transforms import Grayscale
transform = Grayscale(num_output_channels=3)
sample['image'] = transform(sample['image'])
return sample
@register_op
class RandomGaussianBlur(BaseOperator):
def __init__(self, prob=0.5, sigma=[0.1, 2.0]):
super(RandomGaussianBlur, self).__init__()
self.prob = prob
self.sigma = sigma
def apply(self, sample, context=None):
if np.random.uniform(0, 1) < self.prob:
sigma = np.random.uniform(self.sigma[0], self.sigma[1])
im = cv2.GaussianBlur(sample['image'], (23, 23), sigma)
sample['image'] = im
return sample
@register_op
class RandomErasing(BaseOperator):
def __init__(self,
prob=0.5,
scale=(0.02, 0.33),
ratio=(0.3, 3.3),
value=0,
inplace=False):
super(RandomErasing, self).__init__()
assert isinstance(scale,
(tuple, list)), "scale should be a tuple or list"
assert (scale[0] >= 0 and scale[1] <= 1 and scale[0] <= scale[1]
), "scale should be of kind (min, max) and in range [0, 1]"
assert isinstance(ratio,
(tuple, list)), "ratio should be a tuple or list"
assert (ratio[0] >= 0 and
ratio[0] <= ratio[1]), "ratio should be of kind (min, max)"
assert isinstance(
value, (Number, str, tuple,
list)), "value should be a number, tuple, list or str"
if isinstance(value, str) and value != "random":
raise ValueError("value must be 'random' when type is str")
self.prob = prob
self.scale = scale
self.ratio = ratio
self.value = value
self.inplace = inplace
def _erase(self, img, i, j, h, w, v, inplace=False):
if not inplace:
img = img.copy()
img[i:i + h, j:j + w, ...] = v
return img
def _get_param(self, img, scale, ratio, value):
shape = np.asarray(img).astype(np.uint8).shape
h, w, c = shape[-3], shape[-2], shape[-1]
img_area = h * w
log_ratio = np.log(ratio)
for _ in range(1):
erase_area = np.random.uniform(*scale) * img_area
aspect_ratio = np.exp(np.random.uniform(*log_ratio))
erase_h = int(round(np.sqrt(erase_area * aspect_ratio)))
erase_w = int(round(np.sqrt(erase_area / aspect_ratio)))
if erase_h >= h or erase_w >= w:
continue
if value is None:
v = np.random.normal(size=[erase_h, erase_w, c]) * 255
else:
v = np.array(value)[None, None, :]
top = np.random.randint(0, h - erase_h + 1)
left = np.random.randint(0, w - erase_w + 1)
return top, left, erase_h, erase_w, v
return 0, 0, h, w, img
def apply(self, sample, context=None):
if random.random() < self.prob:
if isinstance(self.value, Number):
value = [self.value]
elif isinstance(self.value, str):
value = None
else:
value = self.value
if value is not None and not (len(value) == 1 or len(value) == 3):
raise ValueError(
"Value should be a single number or a sequence with length equals to image's channel."
)
im = sample['image']
top, left, erase_h, erase_w, v = self._get_param(im, self.scale,
self.ratio, value)
im = self._erase(im, top, left, erase_h, erase_w, v, self.inplace)
sample['image'] = im
return sample
@register_op
class RandomErasingCrop(BaseOperator):
def __init__(self):
super(RandomErasingCrop, self).__init__()
self.transform1 = RandomErasing(
prob=0.7, scale=(0.05, 0.2), ratio=(0.3, 3.3), value="random")
self.transform2 = RandomErasing(
prob=0.5, scale=(0.05, 0.2), ratio=(0.1, 6), value="random")
self.transform3 = RandomErasing(
prob=0.3, scale=(0.05, 0.2), ratio=(0.05, 8), value="random")
def apply(self, sample, context=None):
sample = self.transform1(sample)
sample = self.transform2(sample)
sample = self.transform3(sample)
return sample
| PaddleDetection/ppdet/data/transform/operators.py/0 | {
"file_path": "PaddleDetection/ppdet/data/transform/operators.py",
"repo_id": "PaddleDetection",
"token_count": 84219
} | 66 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The code is based on
// https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/csrc/box_iou_rotated/
#include "paddle/extension.h"
#include "rbox_iou_utils.h"
template <typename T>
void rbox_iou_cpu_kernel(const int rbox1_num, const int rbox2_num,
const T *rbox1_data_ptr, const T *rbox2_data_ptr,
T *output_data_ptr) {
int i, j;
for (i = 0; i < rbox1_num; i++) {
for (j = 0; j < rbox2_num; j++) {
int offset = i * rbox2_num + j;
output_data_ptr[offset] =
rbox_iou_single<T>(rbox1_data_ptr + i * 5, rbox2_data_ptr + j * 5);
}
}
}
#define CHECK_INPUT_CPU(x) \
PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
std::vector<paddle::Tensor> RboxIouCPUForward(const paddle::Tensor &rbox1,
const paddle::Tensor &rbox2) {
CHECK_INPUT_CPU(rbox1);
CHECK_INPUT_CPU(rbox2);
auto rbox1_num = rbox1.shape()[0];
auto rbox2_num = rbox2.shape()[0];
auto output =
paddle::empty({rbox1_num, rbox2_num}, rbox1.dtype(), paddle::CPUPlace());
PD_DISPATCH_FLOATING_TYPES(rbox1.type(), "rbox_iou_cpu_kernel", ([&] {
rbox_iou_cpu_kernel<data_t>(
rbox1_num, rbox2_num, rbox1.data<data_t>(),
rbox2.data<data_t>(), output.data<data_t>());
}));
return {output};
}
#ifdef PADDLE_WITH_CUDA
std::vector<paddle::Tensor> RboxIouCUDAForward(const paddle::Tensor &rbox1,
const paddle::Tensor &rbox2);
#endif
#define CHECK_INPUT_SAME(x1, x2) \
PD_CHECK(x1.place() == x2.place(), "input must be smae pacle.")
std::vector<paddle::Tensor> RboxIouForward(const paddle::Tensor &rbox1,
const paddle::Tensor &rbox2) {
CHECK_INPUT_SAME(rbox1, rbox2);
if (rbox1.is_cpu()) {
return RboxIouCPUForward(rbox1, rbox2);
#ifdef PADDLE_WITH_CUDA
} else if (rbox1.is_gpu()) {
return RboxIouCUDAForward(rbox1, rbox2);
#endif
}
}
std::vector<std::vector<int64_t>>
RboxIouInferShape(std::vector<int64_t> rbox1_shape,
std::vector<int64_t> rbox2_shape) {
return {{rbox1_shape[0], rbox2_shape[0]}};
}
std::vector<paddle::DataType> RboxIouInferDtype(paddle::DataType t1,
paddle::DataType t2) {
return {t1};
}
PD_BUILD_OP(rbox_iou)
.Inputs({"RBox1", "RBox2"})
.Outputs({"Output"})
.SetKernelFn(PD_KERNEL(RboxIouForward))
.SetInferShapeFn(PD_INFER_SHAPE(RboxIouInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(RboxIouInferDtype));
| PaddleDetection/ppdet/ext_op/csrc/rbox_iou/rbox_iou.cc/0 | {
"file_path": "PaddleDetection/ppdet/ext_op/csrc/rbox_iou/rbox_iou.cc",
"repo_id": "PaddleDetection",
"token_count": 1706
} | 67 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.distributed import ParallelEnv
import os
import json
from collections import defaultdict, OrderedDict
import numpy as np
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['Pose3DEval']
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def mean_per_joint_position_error(pred, gt, has_3d_joints):
"""
Compute mPJPE
"""
gt = gt[has_3d_joints == 1]
gt = gt[:, :, :3]
pred = pred[has_3d_joints == 1]
with paddle.no_grad():
gt_pelvis = (gt[:, 2, :] + gt[:, 3, :]) / 2
gt = gt - gt_pelvis[:, None, :]
pred_pelvis = (pred[:, 2, :] + pred[:, 3, :]) / 2
pred = pred - pred_pelvis[:, None, :]
error = paddle.sqrt(((pred - gt)**2).sum(axis=-1)).mean(axis=-1).numpy()
return error
def compute_similarity_transform(S1, S2):
"""Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem.
"""
transposed = False
if S1.shape[0] != 3 and S1.shape[0] != 2:
S1 = S1.T
S2 = S2.T
transposed = True
assert (S2.shape[1] == S1.shape[1])
# 1. Remove mean.
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = S1 - mu1
X2 = S2 - mu2
# 2. Compute variance of X1 used for scale.
var1 = np.sum(X1**2)
# 3. The outer product of X1 and X2.
K = X1.dot(X2.T)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
# singular vectors of K.
U, s, Vh = np.linalg.svd(K)
V = Vh.T
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
# Construct R.
R = V.dot(Z.dot(U.T))
# 5. Recover scale.
scale = np.trace(R.dot(K)) / var1
# 6. Recover translation.
t = mu2 - scale * (R.dot(mu1))
# 7. Error:
S1_hat = scale * R.dot(S1) + t
if transposed:
S1_hat = S1_hat.T
return S1_hat
def compute_similarity_transform_batch(S1, S2):
"""Batched version of compute_similarity_transform."""
S1_hat = np.zeros_like(S1)
for i in range(S1.shape[0]):
S1_hat[i] = compute_similarity_transform(S1[i], S2[i])
return S1_hat
def reconstruction_error(S1, S2, reduction='mean'):
"""Do Procrustes alignment and compute reconstruction error."""
S1_hat = compute_similarity_transform_batch(S1, S2)
re = np.sqrt(((S1_hat - S2)**2).sum(axis=-1)).mean(axis=-1)
if reduction == 'mean':
re = re.mean()
elif reduction == 'sum':
re = re.sum()
return re
def all_gather(data):
if paddle.distributed.get_world_size() == 1:
return data
vlist = []
paddle.distributed.all_gather(vlist, data)
data = paddle.concat(vlist, 0)
return data
class Pose3DEval(object):
def __init__(self, output_eval, save_prediction_only=False):
super(Pose3DEval, self).__init__()
self.output_eval = output_eval
self.res_file = os.path.join(output_eval, "pose3d_results.json")
self.save_prediction_only = save_prediction_only
self.reset()
def reset(self):
self.PAmPJPE = AverageMeter()
self.mPJPE = AverageMeter()
self.eval_results = {}
def get_human36m_joints(self, input):
J24_TO_J14 = paddle.to_tensor(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18])
J24_TO_J17 = paddle.to_tensor(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 18, 19])
return paddle.index_select(input, J24_TO_J14, axis=1)
def update(self, inputs, outputs):
gt_3d_joints = all_gather(inputs['joints_3d'].cuda(ParallelEnv()
.local_rank))
has_3d_joints = all_gather(inputs['has_3d_joints'].cuda(ParallelEnv()
.local_rank))
pred_3d_joints = all_gather(outputs['pose3d'])
if gt_3d_joints.shape[1] == 24:
gt_3d_joints = self.get_human36m_joints(gt_3d_joints)
if pred_3d_joints.shape[1] == 24:
pred_3d_joints = self.get_human36m_joints(pred_3d_joints)
mPJPE_val = mean_per_joint_position_error(pred_3d_joints, gt_3d_joints,
has_3d_joints).mean()
PAmPJPE_val = reconstruction_error(
pred_3d_joints.numpy(),
gt_3d_joints[:, :, :3].numpy(),
reduction=None).mean()
count = int(np.sum(has_3d_joints.numpy()))
self.PAmPJPE.update(PAmPJPE_val * 1000., count)
self.mPJPE.update(mPJPE_val * 1000., count)
def accumulate(self):
if self.save_prediction_only:
logger.info(f'The pose3d result is saved to {self.res_file} '
'and do not evaluate the model.')
return
self.eval_results['pose3d'] = [-self.mPJPE.avg, -self.PAmPJPE.avg]
def log(self):
if self.save_prediction_only:
return
stats_names = ['mPJPE', 'PAmPJPE']
num_values = len(stats_names)
print(' '.join(['| {}'.format(name) for name in stats_names]) + ' |')
print('|---' * (num_values + 1) + '|')
print(' '.join([
'| {:.3f}'.format(abs(value))
for value in self.eval_results['pose3d']
]) + ' |')
def get_results(self):
return self.eval_results
| PaddleDetection/ppdet/metrics/pose3d_metrics.py/0 | {
"file_path": "PaddleDetection/ppdet/metrics/pose3d_metrics.py",
"repo_id": "PaddleDetection",
"token_count": 3162
} | 68 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
__all__ = ['DeepSORT']
@register
class DeepSORT(BaseArch):
"""
DeepSORT network, see https://arxiv.org/abs/1703.07402
Args:
detector (object): detector model instance
reid (object): reid model instance
tracker (object): tracker instance
"""
__category__ = 'architecture'
def __init__(self,
detector='YOLOv3',
reid='PCBPyramid',
tracker='DeepSORTTracker'):
super(DeepSORT, self).__init__()
self.detector = detector
self.reid = reid
self.tracker = tracker
@classmethod
def from_config(cls, cfg, *args, **kwargs):
if cfg['detector'] != 'None':
detector = create(cfg['detector'])
else:
detector = None
reid = create(cfg['reid'])
tracker = create(cfg['tracker'])
return {
"detector": detector,
"reid": reid,
"tracker": tracker,
}
def _forward(self):
crops = self.inputs['crops']
outs = {}
outs['embeddings'] = self.reid(crops)
return outs
def get_pred(self):
return self._forward()
| PaddleDetection/ppdet/modeling/architectures/deepsort.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/architectures/deepsort.py",
"repo_id": "PaddleDetection",
"token_count": 843
} | 69 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
from .. import layers as L
__all__ = ['METRO_Body']
def orthographic_projection(X, camera):
"""Perform orthographic projection of 3D points X using the camera parameters
Args:
X: size = [B, N, 3]
camera: size = [B, 3]
Returns:
Projected 2D points -- size = [B, N, 2]
"""
camera = camera.reshape((-1, 1, 3))
X_trans = X[:, :, :2] + camera[:, :, 1:]
shape = paddle.shape(X_trans)
X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape)
return X_2d
@register
class METRO_Body(BaseArch):
__category__ = 'architecture'
__inject__ = ['loss']
def __init__(
self,
num_joints,
backbone='HRNet',
trans_encoder='',
loss='Pose3DLoss', ):
"""
Modified from METRO network, see https://arxiv.org/abs/2012.09760
Args:
backbone (nn.Layer): backbone instance
"""
super(METRO_Body, self).__init__()
self.num_joints = num_joints
self.backbone = backbone
self.loss = loss
self.deploy = False
self.trans_encoder = trans_encoder
self.conv_learn_tokens = paddle.nn.Conv1D(49, num_joints + 10, 1)
self.cam_param_fc = paddle.nn.Linear(3, 2)
@classmethod
def from_config(cls, cfg, *args, **kwargs):
# backbone
backbone = create(cfg['backbone'])
trans_encoder = create(cfg['trans_encoder'])
return {'backbone': backbone, 'trans_encoder': trans_encoder}
def _forward(self):
batch_size = self.inputs['image'].shape[0]
image_feat = self.backbone(self.inputs)
image_feat_flatten = image_feat.reshape((batch_size, 2048, 49))
image_feat_flatten = image_feat_flatten.transpose(perm=(0, 2, 1))
# and apply a conv layer to learn image token for each 3d joint/vertex position
features = self.conv_learn_tokens(image_feat_flatten) # (B, J, C)
if self.training:
# apply mask vertex/joint modeling
# meta_masks is a tensor of all the masks, randomly generated in dataloader
# we pre-define a [MASK] token, which is a floating-value vector with 0.01s
meta_masks = self.inputs['mjm_mask'].expand((-1, -1, 2048))
constant_tensor = paddle.ones_like(features) * 0.01
features = features * meta_masks + constant_tensor * (1 - meta_masks
)
pred_out = self.trans_encoder(features)
pred_3d_joints = pred_out[:, :self.num_joints, :]
cam_features = pred_out[:, self.num_joints:, :]
# learn camera parameters
pred_2d_joints = self.cam_param_fc(cam_features)
return pred_3d_joints, pred_2d_joints
def get_loss(self):
preds_3d, preds_2d = self._forward()
loss = self.loss(preds_3d, preds_2d, self.inputs)
output = {'loss': loss}
return output
def get_pred(self):
preds_3d, preds_2d = self._forward()
outputs = {'pose3d': preds_3d, 'pose2d': preds_2d}
return outputs
| PaddleDetection/ppdet/modeling/architectures/pose3d_metro.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/architectures/pose3d_metro.py",
"repo_id": "PaddleDetection",
"token_count": 1723
} | 70 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.rbox_utils import box2corners, check_points_in_polys, paddle_gather
__all__ = ['FCOSRAssigner']
EPS = 1e-9
@register
class FCOSRAssigner(nn.Layer):
""" FCOSR Assigner, refer to https://arxiv.org/abs/2111.10780 for details
1. compute normalized gaussian distribution score and refined gaussian distribution score
2. refer to ellipse center sampling, sample points whose normalized gaussian distribution score is greater than threshold
3. refer to multi-level sampling, assign ground truth to feature map which follows two conditions.
i). first, the ratio between the short edge of the target and the stride of the feature map is less than 2.
ii). second, the long edge of minimum bounding rectangle of the target is larger than the acceptance range of feature map
4. refer to fuzzy sample label assignment, the points satisfying 2 and 3 will be assigned to the ground truth according to gaussian distribution score
"""
__shared__ = ['num_classes']
def __init__(self,
num_classes=80,
factor=12,
threshold=0.23,
boundary=[[-1, 128], [128, 320], [320, 10000]],
score_type='iou'):
super(FCOSRAssigner, self).__init__()
self.num_classes = num_classes
self.factor = factor
self.threshold = threshold
self.boundary = [
paddle.to_tensor(
l, dtype=paddle.float32).reshape([1, 1, 2]) for l in boundary
]
self.score_type = score_type
def get_gaussian_distribution_score(self, points, gt_rboxes, gt_polys):
# projecting points to coordinate system defined by each rbox
# [B, N, 4, 2] -> 4 * [B, N, 1, 2]
a, b, c, d = gt_polys.split(4, axis=2)
# [1, L, 2] -> [1, 1, L, 2]
points = points.unsqueeze(0)
ab = b - a
ad = d - a
# [B, N, 5] -> [B, N, 2], [B, N, 2], [B, N, 1]
xy, wh, angle = gt_rboxes.split([2, 2, 1], axis=-1)
# [B, N, 2] -> [B, N, 1, 2]
xy = xy.unsqueeze(2)
# vector of points to center [B, N, L, 2]
vec = points - xy
# <ab, vec> = |ab| * |vec| * cos(theta) [B, N, L]
vec_dot_ab = paddle.sum(vec * ab, axis=-1)
# <ad, vec> = |ad| * |vec| * cos(theta) [B, N, L]
vec_dot_ad = paddle.sum(vec * ad, axis=-1)
# norm_ab [B, N, L]
norm_ab = paddle.sum(ab * ab, axis=-1).sqrt()
# norm_ad [B, N, L]
norm_ad = paddle.sum(ad * ad, axis=-1).sqrt()
# min(h, w), [B, N, 1]
min_edge = paddle.min(wh, axis=-1, keepdim=True)
# delta_x, delta_y [B, N, L]
delta_x = vec_dot_ab.pow(2) / (norm_ab.pow(3) * min_edge + EPS)
delta_y = vec_dot_ad.pow(2) / (norm_ad.pow(3) * min_edge + EPS)
# score [B, N, L]
norm_score = paddle.exp(-0.5 * self.factor * (delta_x + delta_y))
# simplified calculation
sigma = min_edge / self.factor
refined_score = norm_score / (2 * np.pi * sigma + EPS)
return norm_score, refined_score
def get_rotated_inside_mask(self, points, gt_polys, scores):
inside_mask = check_points_in_polys(points, gt_polys)
center_mask = scores >= self.threshold
return (inside_mask & center_mask).cast(paddle.float32)
def get_inside_range_mask(self, points, gt_bboxes, gt_rboxes, stride_tensor,
regress_range):
# [1, L, 2] -> [1, 1, L, 2]
points = points.unsqueeze(0)
# [B, n, 4] -> [B, n, 1, 4]
x1y1, x2y2 = gt_bboxes.unsqueeze(2).split(2, axis=-1)
# [B, n, L, 2]
lt = points - x1y1
rb = x2y2 - points
# [B, n, L, 4]
ltrb = paddle.concat([lt, rb], axis=-1)
# [B, n, L, 4] -> [B, n, L]
inside_mask = paddle.min(ltrb, axis=-1) > EPS
# regress_range [1, L, 2] -> [1, 1, L, 2]
regress_range = regress_range.unsqueeze(0)
# stride_tensor [1, L, 1] -> [1, 1, L]
stride_tensor = stride_tensor.transpose((0, 2, 1))
# fcos range
# [B, n, L, 4] -> [B, n, L]
ltrb_max = paddle.max(ltrb, axis=-1)
# [1, 1, L, 2] -> [1, 1, L]
low, high = regress_range[..., 0], regress_range[..., 1]
# [B, n, L]
regress_mask = (ltrb_max >= low) & (ltrb_max <= high)
# mask for rotated
# [B, n, 1]
min_edge = paddle.min(gt_rboxes[..., 2:4], axis=-1, keepdim=True)
# [B, n , L]
rotated_mask = ((min_edge / stride_tensor) < 2.0) & (ltrb_max > high)
mask = inside_mask & (regress_mask | rotated_mask)
return mask.cast(paddle.float32)
@paddle.no_grad()
def forward(self,
anchor_points,
stride_tensor,
num_anchors_list,
gt_labels,
gt_bboxes,
gt_rboxes,
pad_gt_mask,
bg_index,
pred_rboxes=None):
r"""
Args:
anchor_points (Tensor, float32): pre-defined anchor points, shape(1, L, 2),
"x, y" format
stride_tensor (Tensor, float32): stride tensor, shape (1, L, 1)
num_anchors_list (List): num of anchors in each level
gt_labels (Tensor, int64|int32): Label of gt_bboxes, shape(B, n, 1)
gt_bboxes (Tensor, float32): Ground truth bboxes, shape(B, n, 4)
gt_rboxes (Tensor, float32): Ground truth bboxes, shape(B, n, 5)
pad_gt_mask (Tensor, float32): 1 means bbox, 0 means no bbox, shape(B, n, 1)
bg_index (int): background index
pred_rboxes (Tensor, float32, optional): predicted bounding boxes, shape(B, L, 5)
Returns:
assigned_labels (Tensor): (B, L)
assigned_rboxes (Tensor): (B, L, 5)
assigned_scores (Tensor): (B, L, C), if pred_rboxes is not None, then output ious
"""
_, num_anchors, _ = anchor_points.shape
batch_size, num_max_boxes, _ = gt_rboxes.shape
if num_max_boxes == 0:
assigned_labels = paddle.full(
[batch_size, num_anchors], bg_index, dtype=gt_labels.dtype)
assigned_rboxes = paddle.zeros([batch_size, num_anchors, 5])
assigned_scores = paddle.zeros(
[batch_size, num_anchors, self.num_classes])
return assigned_labels, assigned_rboxes, assigned_scores
# get normalized gaussian distribution score and refined distribution score
gt_polys = box2corners(gt_rboxes)
score, refined_score = self.get_gaussian_distribution_score(
anchor_points, gt_rboxes, gt_polys)
inside_mask = self.get_rotated_inside_mask(anchor_points, gt_polys,
score)
regress_ranges = []
for num, bound in zip(num_anchors_list, self.boundary):
regress_ranges.append(bound.tile((1, num, 1)))
regress_ranges = paddle.concat(regress_ranges, axis=1)
regress_mask = self.get_inside_range_mask(
anchor_points, gt_bboxes, gt_rboxes, stride_tensor, regress_ranges)
# [B, n, L]
mask_positive = inside_mask * regress_mask * pad_gt_mask
refined_score = refined_score * mask_positive - (1. - mask_positive)
argmax_refined_score = refined_score.argmax(axis=-2)
max_refined_score = refined_score.max(axis=-2)
assigned_gt_index = argmax_refined_score
# assigned target
batch_ind = paddle.arange(
end=batch_size, dtype=gt_labels.dtype).unsqueeze(-1)
assigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes
assigned_labels = paddle.gather(
gt_labels.flatten(), assigned_gt_index.flatten(), axis=0)
assigned_labels = assigned_labels.reshape([batch_size, num_anchors])
assigned_labels = paddle.where(
max_refined_score > 0, assigned_labels,
paddle.full_like(assigned_labels, bg_index))
assigned_rboxes = paddle.gather(
gt_rboxes.reshape([-1, 5]), assigned_gt_index.flatten(), axis=0)
assigned_rboxes = assigned_rboxes.reshape([batch_size, num_anchors, 5])
assigned_scores = F.one_hot(assigned_labels, self.num_classes + 1)
ind = list(range(self.num_classes + 1))
ind.remove(bg_index)
assigned_scores = paddle.index_select(
assigned_scores, paddle.to_tensor(ind), axis=-1)
if self.score_type == 'gaussian':
selected_scores = paddle_gather(
score, 1, argmax_refined_score.unsqueeze(-2)).squeeze(-2)
assigned_scores = assigned_scores * selected_scores.unsqueeze(-1)
elif self.score_type == 'iou':
assert pred_rboxes is not None, 'If score type is iou, pred_rboxes should not be None'
from ext_op import matched_rbox_iou
b, l = pred_rboxes.shape[:2]
iou_score = matched_rbox_iou(
pred_rboxes.reshape((-1, 5)), assigned_rboxes.reshape(
(-1, 5))).reshape((b, l, 1))
assigned_scores = assigned_scores * iou_score
return assigned_labels, assigned_rboxes, assigned_scores | PaddleDetection/ppdet/modeling/assigners/fcosr_assigner.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/assigners/fcosr_assigner.py",
"repo_id": "PaddleDetection",
"token_count": 4743
} | 71 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
from ppdet.modeling.ops import batch_norm, mish
from ..shape_spec import ShapeSpec
__all__ = ['DarkNet', 'ConvBNLayer']
class ConvBNLayer(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size=3,
stride=1,
groups=1,
padding=0,
norm_type='bn',
norm_decay=0.,
act="leaky",
freeze_norm=False,
data_format='NCHW',
name=''):
"""
conv + bn + activation layer
Args:
ch_in (int): input channel
ch_out (int): output channel
filter_size (int): filter size, default 3
stride (int): stride, default 1
groups (int): number of groups of conv layer, default 1
padding (int): padding size, default 0
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
act (str): activation function type, default 'leaky', which means leaky_relu
freeze_norm (bool): whether to freeze norm, default False
data_format (str): data format, NCHW or NHWC
"""
super(ConvBNLayer, self).__init__()
self.conv = nn.Conv2D(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
data_format=data_format,
bias_attr=False)
self.batch_norm = batch_norm(
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.act = act
def forward(self, inputs):
out = self.conv(inputs)
out = self.batch_norm(out)
if self.act == 'leaky':
out = F.leaky_relu(out, 0.1)
else:
out = getattr(F, self.act)(out)
return out
class DownSample(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size=3,
stride=2,
padding=1,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
data_format='NCHW'):
"""
downsample layer
Args:
ch_in (int): input channel
ch_out (int): output channel
filter_size (int): filter size, default 3
stride (int): stride, default 2
padding (int): padding size, default 1
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
freeze_norm (bool): whether to freeze norm, default False
data_format (str): data format, NCHW or NHWC
"""
super(DownSample, self).__init__()
self.conv_bn_layer = ConvBNLayer(
ch_in=ch_in,
ch_out=ch_out,
filter_size=filter_size,
stride=stride,
padding=padding,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.ch_out = ch_out
def forward(self, inputs):
out = self.conv_bn_layer(inputs)
return out
class BasicBlock(nn.Layer):
def __init__(self,
ch_in,
ch_out,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
data_format='NCHW'):
"""
BasicBlock layer of DarkNet
Args:
ch_in (int): input channel
ch_out (int): output channel
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
freeze_norm (bool): whether to freeze norm, default False
data_format (str): data format, NCHW or NHWC
"""
super(BasicBlock, self).__init__()
assert ch_in == ch_out and (ch_in % 2) == 0, \
f"ch_in and ch_out should be the same even int, but the input \'ch_in is {ch_in}, \'ch_out is {ch_out}"
# example:
# --------------{conv1} --> {conv2}
# channel route: 10-->5 --> 5-->10
self.conv1 = ConvBNLayer(
ch_in=ch_in,
ch_out=int(ch_out / 2),
filter_size=1,
stride=1,
padding=0,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.conv2 = ConvBNLayer(
ch_in=int(ch_out / 2),
ch_out=ch_out,
filter_size=3,
stride=1,
padding=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv2 = self.conv2(conv1)
out = paddle.add(x=inputs, y=conv2)
return out
class Blocks(nn.Layer):
def __init__(self,
ch_in,
ch_out,
count,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
name=None,
data_format='NCHW'):
"""
Blocks layer, which consist of some BaickBlock layers
Args:
ch_in (int): input channel
ch_out (int): output channel
count (int): number of BasicBlock layer
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
freeze_norm (bool): whether to freeze norm, default False
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
super(Blocks, self).__init__()
self.basicblock0 = BasicBlock(
ch_in,
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.res_out_list = []
for i in range(1, count):
block_name = '{}.{}'.format(name, i)
res_out = self.add_sublayer(
block_name,
BasicBlock(
ch_out,
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format))
self.res_out_list.append(res_out)
self.ch_out = ch_out
def forward(self, inputs):
y = self.basicblock0(inputs)
for basic_block_i in self.res_out_list:
y = basic_block_i(y)
return y
DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}
@register
@serializable
class DarkNet(nn.Layer):
__shared__ = ['norm_type', 'data_format']
def __init__(self,
depth=53,
freeze_at=-1,
return_idx=[2, 3, 4],
num_stages=5,
norm_type='bn',
norm_decay=0.,
freeze_norm=False,
data_format='NCHW'):
"""
Darknet, see https://pjreddie.com/darknet/yolo/
Args:
depth (int): depth of network
freeze_at (int): freeze the backbone at which stage
filter_size (int): filter size, default 3
return_idx (list): index of stages whose feature maps are returned
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
data_format (str): data format, NCHW or NHWC
"""
super(DarkNet, self).__init__()
self.depth = depth
self.freeze_at = freeze_at
self.return_idx = return_idx
self.num_stages = num_stages
self.stages = DarkNet_cfg[self.depth][0:num_stages]
self.conv0 = ConvBNLayer(
ch_in=3,
ch_out=32,
filter_size=3,
stride=1,
padding=1,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self.downsample0 = DownSample(
ch_in=32,
ch_out=32 * 2,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format)
self._out_channels = []
self.darknet_conv_block_list = []
self.downsample_list = []
ch_in = [64, 128, 256, 512, 1024]
for i, stage in enumerate(self.stages):
name = 'stage.{}'.format(i)
conv_block = self.add_sublayer(
name,
Blocks(
int(ch_in[i]),
int(ch_in[i]),
stage,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format,
name=name))
self.darknet_conv_block_list.append(conv_block)
if i in return_idx:
self._out_channels.append(int(ch_in[i]))
for i in range(num_stages - 1):
down_name = 'stage.{}.downsample'.format(i)
downsample = self.add_sublayer(
down_name,
DownSample(
ch_in=int(ch_in[i]),
ch_out=int(ch_in[i + 1]),
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
data_format=data_format))
self.downsample_list.append(downsample)
def forward(self, inputs):
x = inputs['image']
out = self.conv0(x)
out = self.downsample0(out)
blocks = []
for i, conv_block_i in enumerate(self.darknet_conv_block_list):
out = conv_block_i(out)
if i == self.freeze_at:
out.stop_gradient = True
if i in self.return_idx:
blocks.append(out)
if i < self.num_stages - 1:
out = self.downsample_list[i](out)
return blocks
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels]
| PaddleDetection/ppdet/modeling/backbones/darknet.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/backbones/darknet.py",
"repo_id": "PaddleDetection",
"token_count": 6061
} | 72 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.nn as nn
from ppdet.core.workspace import register, serializable
from .resnet import ResNet, Blocks, BasicBlock, BottleNeck
from ..shape_spec import ShapeSpec
from .name_adapter import NameAdapter
__all__ = ['SENet', 'SERes5Head']
@register
@serializable
class SENet(ResNet):
__shared__ = ['norm_type']
def __init__(self,
depth=50,
variant='b',
lr_mult_list=[1.0, 1.0, 1.0, 1.0],
groups=1,
base_width=64,
norm_type='bn',
norm_decay=0,
freeze_norm=True,
freeze_at=0,
return_idx=[0, 1, 2, 3],
dcn_v2_stages=[-1],
std_senet=True,
num_stages=4):
"""
Squeeze-and-Excitation Networks, see https://arxiv.org/abs/1709.01507
Args:
depth (int): SENet depth, should be 50, 101, 152
variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
lr_mult_list (list): learning rate ratio of different resnet stages(2,3,4,5),
lower learning rate ratio is need for pretrained model
got using distillation(default as [1.0, 1.0, 1.0, 1.0]).
groups (int): group convolution cardinality
base_width (int): base width of each group convolution
norm_type (str): normalization type, 'bn', 'sync_bn' or 'affine_channel'
norm_decay (float): weight decay for normalization layer weights
freeze_norm (bool): freeze normalization layers
freeze_at (int): freeze the backbone at which stage
return_idx (list): index of the stages whose feature maps are returned
dcn_v2_stages (list): index of stages who select deformable conv v2
std_senet (bool): whether use senet, default True
num_stages (int): total num of stages
"""
super(SENet, self).__init__(
depth=depth,
variant=variant,
lr_mult_list=lr_mult_list,
ch_in=128,
groups=groups,
base_width=base_width,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
freeze_at=freeze_at,
return_idx=return_idx,
dcn_v2_stages=dcn_v2_stages,
std_senet=std_senet,
num_stages=num_stages)
@register
class SERes5Head(nn.Layer):
def __init__(self,
depth=50,
variant='b',
lr_mult=1.0,
groups=1,
base_width=64,
norm_type='bn',
norm_decay=0,
dcn_v2=False,
freeze_norm=False,
std_senet=True):
"""
SERes5Head layer
Args:
depth (int): SENet depth, should be 50, 101, 152
variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
lr_mult (list): learning rate ratio of SERes5Head, default as 1.0.
groups (int): group convolution cardinality
base_width (int): base width of each group convolution
norm_type (str): normalization type, 'bn', 'sync_bn' or 'affine_channel'
norm_decay (float): weight decay for normalization layer weights
dcn_v2_stages (list): index of stages who select deformable conv v2
std_senet (bool): whether use senet, default True
"""
super(SERes5Head, self).__init__()
ch_out = 512
ch_in = 256 if depth < 50 else 1024
na = NameAdapter(self)
block = BottleNeck if depth >= 50 else BasicBlock
self.res5 = Blocks(
block,
ch_in,
ch_out,
count=3,
name_adapter=na,
stage_num=5,
variant=variant,
groups=groups,
base_width=base_width,
lr=lr_mult,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
dcn_v2=dcn_v2,
std_senet=std_senet)
self.ch_out = ch_out * block.expansion
@property
def out_shape(self):
return [ShapeSpec(
channels=self.ch_out,
stride=16, )]
def forward(self, roi_feat):
y = self.res5(roi_feat)
return y
| PaddleDetection/ppdet/modeling/backbones/senet.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/backbones/senet.py",
"repo_id": "PaddleDetection",
"token_count": 2573
} | 73 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from .centernet_head import ConvLayer
from ..keypoint_utils import get_affine_transform
__all__ = ['CenterTrackHead']
@register
class CenterTrackHead(nn.Layer):
"""
Args:
in_channels (int): the channel number of input to CenterNetHead.
num_classes (int): the number of classes, 1 (MOT17 dataset) by default.
head_planes (int): the channel number in all head, 256 by default.
task (str): the type of task for regression, 'tracking' by default.
loss_weight (dict): the weight of each loss.
add_ltrb_amodal (bool): whether to add ltrb_amodal branch, False by default.
"""
__shared__ = ['num_classes']
def __init__(self,
in_channels,
num_classes=1,
head_planes=256,
task='tracking',
loss_weight={
'tracking': 1.0,
'ltrb_amodal': 0.1,
},
add_ltrb_amodal=True):
super(CenterTrackHead, self).__init__()
self.task = task
self.loss_weight = loss_weight
self.add_ltrb_amodal = add_ltrb_amodal
# tracking head
self.tracking = nn.Sequential(
ConvLayer(
in_channels, head_planes, kernel_size=3, padding=1, bias=True),
nn.ReLU(),
ConvLayer(
head_planes, 2, kernel_size=1, stride=1, padding=0, bias=True))
# ltrb_amodal head
if self.add_ltrb_amodal and 'ltrb_amodal' in self.loss_weight:
self.ltrb_amodal = nn.Sequential(
ConvLayer(
in_channels,
head_planes,
kernel_size=3,
padding=1,
bias=True),
nn.ReLU(),
ConvLayer(
head_planes,
4,
kernel_size=1,
stride=1,
padding=0,
bias=True))
# TODO: add more tasks
@classmethod
def from_config(cls, cfg, input_shape):
if isinstance(input_shape, (list, tuple)):
input_shape = input_shape[0]
return {'in_channels': input_shape.channels}
def forward(self,
feat,
inputs,
bboxes=None,
bbox_inds=None,
topk_clses=None,
topk_ys=None,
topk_xs=None):
tracking = self.tracking(feat)
head_outs = {'tracking': tracking}
if self.add_ltrb_amodal and 'ltrb_amodal' in self.loss_weight:
ltrb_amodal = self.ltrb_amodal(feat)
head_outs.update({'ltrb_amodal': ltrb_amodal})
if self.training:
losses = self.get_loss(inputs, self.loss_weight, head_outs)
return losses
else:
ret = self.generic_decode(head_outs, bboxes, bbox_inds, topk_ys,
topk_xs)
return ret
def get_loss(self, inputs, weights, head_outs):
index = inputs['index'].unsqueeze(2)
mask = inputs['index_mask'].unsqueeze(2)
batch_inds = list()
for i in range(head_outs['tracking'].shape[0]):
batch_ind = paddle.full(
shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
batch_inds.append(batch_ind)
batch_inds = paddle.concat(batch_inds, axis=0)
index = paddle.concat(x=[batch_inds, index], axis=2)
# 1.tracking head loss: L1 loss
tracking = head_outs['tracking'].transpose([0, 2, 3, 1])
tracking_target = inputs['tracking']
bs, _, _, c = tracking.shape
tracking = tracking.reshape([bs, -1, c])
pos_tracking = paddle.gather_nd(tracking, index=index)
tracking_mask = paddle.cast(
paddle.expand_as(mask, pos_tracking), dtype=pos_tracking.dtype)
pos_num = tracking_mask.sum()
tracking_mask.stop_gradient = True
tracking_target.stop_gradient = True
tracking_loss = F.l1_loss(
pos_tracking * tracking_mask,
tracking_target * tracking_mask,
reduction='sum')
tracking_loss = tracking_loss / (pos_num + 1e-4)
# 2.ltrb_amodal head loss(optinal): L1 loss
if self.add_ltrb_amodal and 'ltrb_amodal' in self.loss_weight:
ltrb_amodal = head_outs['ltrb_amodal'].transpose([0, 2, 3, 1])
ltrb_amodal_target = inputs['ltrb_amodal']
bs, _, _, c = ltrb_amodal.shape
ltrb_amodal = ltrb_amodal.reshape([bs, -1, c])
pos_ltrb_amodal = paddle.gather_nd(ltrb_amodal, index=index)
ltrb_amodal_mask = paddle.cast(
paddle.expand_as(mask, pos_ltrb_amodal),
dtype=pos_ltrb_amodal.dtype)
pos_num = ltrb_amodal_mask.sum()
ltrb_amodal_mask.stop_gradient = True
ltrb_amodal_target.stop_gradient = True
ltrb_amodal_loss = F.l1_loss(
pos_ltrb_amodal * ltrb_amodal_mask,
ltrb_amodal_target * ltrb_amodal_mask,
reduction='sum')
ltrb_amodal_loss = ltrb_amodal_loss / (pos_num + 1e-4)
losses = {'tracking_loss': tracking_loss, }
plugin_loss = weights['tracking'] * tracking_loss
if self.add_ltrb_amodal and 'ltrb_amodal' in self.loss_weight:
losses.update({'ltrb_amodal_loss': ltrb_amodal_loss})
plugin_loss += weights['ltrb_amodal'] * ltrb_amodal_loss
losses.update({'plugin_loss': plugin_loss})
return losses
def generic_decode(self, head_outs, bboxes, bbox_inds, topk_ys, topk_xs):
topk_ys = paddle.floor(topk_ys) # note: More accurate
topk_xs = paddle.floor(topk_xs)
cts = paddle.concat([topk_xs, topk_ys], 1)
ret = {'bboxes': bboxes, 'cts': cts}
regression_heads = ['tracking'] # todo: add more tasks
for head in regression_heads:
if head in head_outs:
ret[head] = _tranpose_and_gather_feat(head_outs[head],
bbox_inds)
if 'ltrb_amodal' in head_outs:
ltrb_amodal = head_outs['ltrb_amodal']
ltrb_amodal = _tranpose_and_gather_feat(ltrb_amodal, bbox_inds)
bboxes_amodal = paddle.concat(
[
topk_xs * 1.0 + ltrb_amodal[..., 0:1],
topk_ys * 1.0 + ltrb_amodal[..., 1:2],
topk_xs * 1.0 + ltrb_amodal[..., 2:3],
topk_ys * 1.0 + ltrb_amodal[..., 3:4]
],
axis=1)
ret['bboxes'] = paddle.concat([bboxes[:, 0:2], bboxes_amodal], 1)
# cls_id, score, x0, y0, x1, y1
return ret
def centertrack_post_process(self, dets, meta, out_thresh):
if not ('bboxes' in dets):
return [{}]
preds = []
c, s = meta['center'].numpy(), meta['scale'].numpy()
h, w = meta['out_height'].numpy(), meta['out_width'].numpy()
trans = get_affine_transform(
center=c[0],
input_size=s[0],
rot=0,
output_size=[w[0], h[0]],
shift=(0., 0.),
inv=True).astype(np.float32)
for i, dets_bbox in enumerate(dets['bboxes']):
if dets_bbox[1] < out_thresh:
break
item = {}
item['score'] = dets_bbox[1]
item['class'] = int(dets_bbox[0]) + 1
item['ct'] = transform_preds_with_trans(
dets['cts'][i].reshape([1, 2]), trans).reshape(2)
if 'tracking' in dets:
tracking = transform_preds_with_trans(
(dets['tracking'][i] + dets['cts'][i]).reshape([1, 2]),
trans).reshape(2)
item['tracking'] = tracking - item['ct']
if 'bboxes' in dets:
bbox = transform_preds_with_trans(
dets_bbox[2:6].reshape([2, 2]), trans).reshape(4)
item['bbox'] = bbox
preds.append(item)
return preds
def transform_preds_with_trans(coords, trans):
target_coords = np.ones((coords.shape[0], 3), np.float32)
target_coords[:, :2] = coords
target_coords = np.dot(trans, target_coords.transpose()).transpose()
return target_coords[:, :2]
def _tranpose_and_gather_feat(feat, bbox_inds):
feat = feat.transpose([0, 2, 3, 1])
feat = feat.reshape([-1, feat.shape[3]])
feat = paddle.gather(feat, bbox_inds)
return feat
| PaddleDetection/ppdet/modeling/heads/centertrack_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/centertrack_head.py",
"repo_id": "PaddleDetection",
"token_count": 4912
} | 74 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import Normal, Constant
from ppdet.core.workspace import register
from ppdet.modeling.proposal_generator.target_layer import RBoxAssigner
from ppdet.modeling.proposal_generator.anchor_generator import S2ANetAnchorGenerator
from ppdet.modeling.layers import AlignConv
from ..cls_utils import _get_class_default_kwargs
import numpy as np
@register
class S2ANetHead(nn.Layer):
"""
S2Anet head
Args:
stacked_convs (int): number of stacked_convs
feat_in (int): input channels of feat
feat_out (int): output channels of feat
num_classes (int): num_classes
anchor_strides (list): stride of anchors
anchor_scales (list): scale of anchors
anchor_ratios (list): ratios of anchors
target_means (list): target_means
target_stds (list): target_stds
align_conv_type (str): align_conv_type ['Conv', 'AlignConv']
align_conv_size (int): kernel size of align_conv
use_sigmoid_cls (bool): use sigmoid_cls or not
reg_loss_weight (list): loss weight for regression
"""
__shared__ = ['num_classes']
__inject__ = ['anchor_assign', 'nms']
def __init__(self,
stacked_convs=2,
feat_in=256,
feat_out=256,
num_classes=15,
anchor_strides=[8, 16, 32, 64, 128],
anchor_scales=[4],
anchor_ratios=[1.0],
target_means=0.0,
target_stds=1.0,
align_conv_type='AlignConv',
align_conv_size=3,
use_sigmoid_cls=True,
anchor_assign=_get_class_default_kwargs(RBoxAssigner),
reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1],
cls_loss_weight=[1.1, 1.05],
reg_loss_type='l1',
nms_pre=2000,
nms='MultiClassNMS'):
super(S2ANetHead, self).__init__()
self.stacked_convs = stacked_convs
self.feat_in = feat_in
self.feat_out = feat_out
self.anchor_list = None
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.anchor_strides = anchor_strides
self.anchor_strides = paddle.to_tensor(anchor_strides)
self.anchor_base_sizes = list(anchor_strides)
self.means = paddle.ones(shape=[5]) * target_means
self.stds = paddle.ones(shape=[5]) * target_stds
assert align_conv_type in ['AlignConv', 'Conv', 'DCN']
self.align_conv_type = align_conv_type
self.align_conv_size = align_conv_size
self.use_sigmoid_cls = use_sigmoid_cls
self.cls_out_channels = num_classes if self.use_sigmoid_cls else num_classes + 1
self.sampling = False
self.anchor_assign = anchor_assign
self.reg_loss_weight = reg_loss_weight
self.cls_loss_weight = cls_loss_weight
self.alpha = 1.0
self.beta = 1.0
self.reg_loss_type = reg_loss_type
self.nms_pre = nms_pre
self.nms = nms
self.fake_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
# anchor
self.anchor_generators = []
for anchor_base in self.anchor_base_sizes:
self.anchor_generators.append(
S2ANetAnchorGenerator(anchor_base, anchor_scales,
anchor_ratios))
self.anchor_generators = nn.LayerList(self.anchor_generators)
self.fam_cls_convs = nn.Sequential()
self.fam_reg_convs = nn.Sequential()
for i in range(self.stacked_convs):
chan_in = self.feat_in if i == 0 else self.feat_out
self.fam_cls_convs.add_sublayer(
'fam_cls_conv_{}'.format(i),
nn.Conv2D(
in_channels=chan_in,
out_channels=self.feat_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0))))
self.fam_cls_convs.add_sublayer('fam_cls_conv_{}_act'.format(i),
nn.ReLU())
self.fam_reg_convs.add_sublayer(
'fam_reg_conv_{}'.format(i),
nn.Conv2D(
in_channels=chan_in,
out_channels=self.feat_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0))))
self.fam_reg_convs.add_sublayer('fam_reg_conv_{}_act'.format(i),
nn.ReLU())
self.fam_reg = nn.Conv2D(
self.feat_out,
5,
1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0)))
prior_prob = 0.01
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
self.fam_cls = nn.Conv2D(
self.feat_out,
self.cls_out_channels,
1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(bias_init)))
if self.align_conv_type == "AlignConv":
self.align_conv = AlignConv(self.feat_out, self.feat_out,
self.align_conv_size)
elif self.align_conv_type == "Conv":
self.align_conv = nn.Conv2D(
self.feat_out,
self.feat_out,
self.align_conv_size,
padding=(self.align_conv_size - 1) // 2,
bias_attr=ParamAttr(initializer=Constant(0)))
elif self.align_conv_type == "DCN":
self.align_conv_offset = nn.Conv2D(
self.feat_out,
2 * self.align_conv_size**2,
1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0)))
self.align_conv = paddle.vision.ops.DeformConv2D(
self.feat_out,
self.feat_out,
self.align_conv_size,
padding=(self.align_conv_size - 1) // 2,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=False)
self.or_conv = nn.Conv2D(
self.feat_out,
self.feat_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0)))
# ODM
self.odm_cls_convs = nn.Sequential()
self.odm_reg_convs = nn.Sequential()
for i in range(self.stacked_convs):
ch_in = self.feat_out
# ch_in = int(self.feat_out / 8) if i == 0 else self.feat_out
self.odm_cls_convs.add_sublayer(
'odm_cls_conv_{}'.format(i),
nn.Conv2D(
in_channels=ch_in,
out_channels=self.feat_out,
kernel_size=3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0))))
self.odm_cls_convs.add_sublayer('odm_cls_conv_{}_act'.format(i),
nn.ReLU())
self.odm_reg_convs.add_sublayer(
'odm_reg_conv_{}'.format(i),
nn.Conv2D(
in_channels=self.feat_out,
out_channels=self.feat_out,
kernel_size=3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0))))
self.odm_reg_convs.add_sublayer('odm_reg_conv_{}_act'.format(i),
nn.ReLU())
self.odm_cls = nn.Conv2D(
self.feat_out,
self.cls_out_channels,
3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(bias_init)))
self.odm_reg = nn.Conv2D(
self.feat_out,
5,
3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),
bias_attr=ParamAttr(initializer=Constant(0)))
def forward(self, feats, targets=None):
fam_reg_list, fam_cls_list = [], []
odm_reg_list, odm_cls_list = [], []
num_anchors_list, base_anchors_list, refine_anchors_list = [], [], []
for i, feat in enumerate(feats):
# get shape
B = feat.shape[0]
H, W = paddle.shape(feat)[2], paddle.shape(feat)[3]
NA = H * W
num_anchors_list.append(NA)
fam_cls_feat = self.fam_cls_convs(feat)
fam_cls = self.fam_cls(fam_cls_feat)
# [N, CLS, H, W] --> [N, H, W, CLS]
fam_cls = fam_cls.transpose([0, 2, 3, 1]).reshape(
[B, NA, self.cls_out_channels])
fam_cls_list.append(fam_cls)
fam_reg_feat = self.fam_reg_convs(feat)
fam_reg = self.fam_reg(fam_reg_feat)
# [N, 5, H, W] --> [N, H, W, 5]
fam_reg = fam_reg.transpose([0, 2, 3, 1]).reshape([B, NA, 5])
fam_reg_list.append(fam_reg)
# prepare anchor
init_anchors = self.anchor_generators[i]((H, W),
self.anchor_strides[i])
init_anchors = init_anchors.reshape([1, NA, 5])
base_anchors_list.append(init_anchors.squeeze(0))
if self.training:
refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors)
else:
refine_anchor = self.bbox_decode(fam_reg, init_anchors)
refine_anchors_list.append(refine_anchor)
if self.align_conv_type == 'AlignConv':
align_feat = self.align_conv(feat,
refine_anchor.clone(), (H, W),
self.anchor_strides[i])
elif self.align_conv_type == 'DCN':
align_offset = self.align_conv_offset(feat)
align_feat = self.align_conv(feat, align_offset)
elif self.align_conv_type == 'Conv':
align_feat = self.align_conv(feat)
or_feat = self.or_conv(align_feat)
odm_reg_feat = or_feat
odm_cls_feat = or_feat
odm_reg_feat = self.odm_reg_convs(odm_reg_feat)
odm_cls_feat = self.odm_cls_convs(odm_cls_feat)
odm_cls = self.odm_cls(odm_cls_feat)
# [N, CLS, H, W] --> [N, H, W, CLS]
odm_cls = odm_cls.transpose([0, 2, 3, 1]).reshape(
[B, NA, self.cls_out_channels])
odm_cls_list.append(odm_cls)
odm_reg = self.odm_reg(odm_reg_feat)
# [N, 5, H, W] --> [N, H, W, 5]
odm_reg = odm_reg.transpose([0, 2, 3, 1]).reshape([B, NA, 5])
odm_reg_list.append(odm_reg)
if self.training:
return self.get_loss([
fam_cls_list, fam_reg_list, odm_cls_list, odm_reg_list,
num_anchors_list, base_anchors_list, refine_anchors_list
], targets)
else:
odm_bboxes_list = []
for odm_reg, refine_anchor in zip(odm_reg_list,
refine_anchors_list):
odm_bboxes = self.bbox_decode(odm_reg, refine_anchor)
odm_bboxes_list.append(odm_bboxes)
return [odm_bboxes_list, odm_cls_list]
def get_bboxes(self, head_outs):
perd_bboxes_list, pred_scores_list = head_outs
batch = paddle.shape(pred_scores_list[0])[0]
bboxes, bbox_num = [], []
for i in range(batch):
pred_scores_per_image = [t[i] for t in pred_scores_list]
pred_bboxes_per_image = [t[i] for t in perd_bboxes_list]
bbox_per_image, bbox_num_per_image = self.get_bboxes_single(
pred_scores_per_image, pred_bboxes_per_image)
bboxes.append(bbox_per_image)
bbox_num.append(bbox_num_per_image)
bboxes = paddle.concat(bboxes)
bbox_num = paddle.concat(bbox_num)
return bboxes, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i, 0:1], scale_factor[i, 1:2]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
def get_bboxes_single(self, cls_score_list, bbox_pred_list):
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred in zip(cls_score_list, bbox_pred_list):
if self.use_sigmoid_cls:
scores = F.sigmoid(cls_score)
else:
scores = F.softmax(cls_score, axis=-1)
if scores.shape[0] > self.nms_pre:
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores = paddle.max(scores, axis=1)
else:
max_scores = paddle.max(scores[:, :-1], axis=1)
topk_val, topk_inds = paddle.topk(max_scores, self.nms_pre)
bbox_pred = paddle.gather(bbox_pred, topk_inds)
scores = paddle.gather(scores, topk_inds)
mlvl_bboxes.append(bbox_pred)
mlvl_scores.append(scores)
mlvl_bboxes = paddle.concat(mlvl_bboxes)
mlvl_scores = paddle.concat(mlvl_scores)
mlvl_polys = self.rbox2poly(mlvl_bboxes).unsqueeze(0)
mlvl_scores = paddle.transpose(mlvl_scores, [1, 0]).unsqueeze(0)
bbox, bbox_num, _ = self.nms(mlvl_polys, mlvl_scores)
if bbox.shape[0] <= 0:
bbox = self.fake_bbox
bbox_num = self.fake_bbox_num
return bbox, bbox_num
def smooth_l1_loss(self, pred, label, delta=1.0 / 9.0):
"""
Args:
pred: pred score
label: label
delta: delta
Returns: loss
"""
assert pred.shape == label.shape and label.numel() > 0
assert delta > 0
diff = paddle.abs(pred - label)
loss = paddle.where(diff < delta, 0.5 * diff * diff / delta,
diff - 0.5 * delta)
return loss
def get_fam_loss(self, fam_target, s2anet_head_out, reg_loss_type='l1'):
(labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes,
pos_inds, neg_inds) = fam_target
fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list, num_anchors_list = s2anet_head_out
fam_cls_losses = []
fam_bbox_losses = []
st_idx = 0
num_total_samples = len(pos_inds) + len(
neg_inds) if self.sampling else len(pos_inds)
num_total_samples = max(1, num_total_samples)
for idx, feat_anchor_num in enumerate(num_anchors_list):
# step1: get data
feat_labels = labels[st_idx:st_idx + feat_anchor_num]
feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]
feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]
feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]
# step2: calc cls loss
feat_labels = feat_labels.reshape(-1)
feat_label_weights = feat_label_weights.reshape(-1)
fam_cls_score = fam_cls_branch_list[idx]
fam_cls_score = paddle.squeeze(fam_cls_score, axis=0)
fam_cls_score1 = fam_cls_score
feat_labels = paddle.to_tensor(feat_labels)
feat_labels_one_hot = paddle.nn.functional.one_hot(
feat_labels, self.cls_out_channels + 1)
feat_labels_one_hot = feat_labels_one_hot[:, 1:]
feat_labels_one_hot.stop_gradient = True
num_total_samples = paddle.to_tensor(
num_total_samples, dtype='float32', stop_gradient=True)
fam_cls = F.sigmoid_focal_loss(
fam_cls_score1,
feat_labels_one_hot,
normalizer=num_total_samples,
reduction='none')
feat_label_weights = feat_label_weights.reshape(
feat_label_weights.shape[0], 1)
feat_label_weights = np.repeat(
feat_label_weights, self.cls_out_channels, axis=1)
feat_label_weights = paddle.to_tensor(
feat_label_weights, stop_gradient=True)
fam_cls = fam_cls * feat_label_weights
fam_cls_total = paddle.sum(fam_cls)
fam_cls_losses.append(fam_cls_total)
# step3: regression loss
feat_bbox_targets = paddle.to_tensor(
feat_bbox_targets, dtype='float32', stop_gradient=True)
feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])
fam_bbox_pred = fam_reg_branch_list[idx]
fam_bbox_pred = paddle.squeeze(fam_bbox_pred, axis=0)
fam_bbox_pred = paddle.reshape(fam_bbox_pred, [-1, 5])
fam_bbox = self.smooth_l1_loss(fam_bbox_pred, feat_bbox_targets)
loss_weight = paddle.to_tensor(
self.reg_loss_weight, dtype='float32', stop_gradient=True)
fam_bbox = paddle.multiply(fam_bbox, loss_weight)
feat_bbox_weights = paddle.to_tensor(
feat_bbox_weights, stop_gradient=True)
fam_bbox = fam_bbox * feat_bbox_weights
fam_bbox_total = paddle.sum(fam_bbox) / num_total_samples
fam_bbox_losses.append(fam_bbox_total)
st_idx += feat_anchor_num
fam_cls_loss = paddle.add_n(fam_cls_losses)
fam_cls_loss_weight = paddle.to_tensor(
self.cls_loss_weight[0], dtype='float32', stop_gradient=True)
fam_cls_loss = fam_cls_loss * fam_cls_loss_weight
fam_reg_loss = paddle.add_n(fam_bbox_losses)
return fam_cls_loss, fam_reg_loss
def get_odm_loss(self, odm_target, s2anet_head_out, reg_loss_type='l1'):
(labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes,
pos_inds, neg_inds) = odm_target
fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list, num_anchors_list = s2anet_head_out
odm_cls_losses = []
odm_bbox_losses = []
st_idx = 0
num_total_samples = len(pos_inds) + len(
neg_inds) if self.sampling else len(pos_inds)
num_total_samples = max(1, num_total_samples)
for idx, feat_anchor_num in enumerate(num_anchors_list):
# step1: get data
feat_labels = labels[st_idx:st_idx + feat_anchor_num]
feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]
feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]
feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]
# step2: calc cls loss
feat_labels = feat_labels.reshape(-1)
feat_label_weights = feat_label_weights.reshape(-1)
odm_cls_score = odm_cls_branch_list[idx]
odm_cls_score = paddle.squeeze(odm_cls_score, axis=0)
odm_cls_score1 = odm_cls_score
feat_labels = paddle.to_tensor(feat_labels)
feat_labels_one_hot = paddle.nn.functional.one_hot(
feat_labels, self.cls_out_channels + 1)
feat_labels_one_hot = feat_labels_one_hot[:, 1:]
feat_labels_one_hot.stop_gradient = True
num_total_samples = paddle.to_tensor(
num_total_samples, dtype='float32', stop_gradient=True)
odm_cls = F.sigmoid_focal_loss(
odm_cls_score1,
feat_labels_one_hot,
normalizer=num_total_samples,
reduction='none')
feat_label_weights = feat_label_weights.reshape(
feat_label_weights.shape[0], 1)
feat_label_weights = np.repeat(
feat_label_weights, self.cls_out_channels, axis=1)
feat_label_weights = paddle.to_tensor(feat_label_weights)
feat_label_weights.stop_gradient = True
odm_cls = odm_cls * feat_label_weights
odm_cls_total = paddle.sum(odm_cls)
odm_cls_losses.append(odm_cls_total)
# # step3: regression loss
feat_bbox_targets = paddle.to_tensor(
feat_bbox_targets, dtype='float32')
feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])
feat_bbox_targets.stop_gradient = True
odm_bbox_pred = odm_reg_branch_list[idx]
odm_bbox_pred = paddle.squeeze(odm_bbox_pred, axis=0)
odm_bbox_pred = paddle.reshape(odm_bbox_pred, [-1, 5])
odm_bbox = self.smooth_l1_loss(odm_bbox_pred, feat_bbox_targets)
loss_weight = paddle.to_tensor(
self.reg_loss_weight, dtype='float32', stop_gradient=True)
odm_bbox = paddle.multiply(odm_bbox, loss_weight)
feat_bbox_weights = paddle.to_tensor(
feat_bbox_weights, stop_gradient=True)
odm_bbox = odm_bbox * feat_bbox_weights
odm_bbox_total = paddle.sum(odm_bbox) / num_total_samples
odm_bbox_losses.append(odm_bbox_total)
st_idx += feat_anchor_num
odm_cls_loss = paddle.add_n(odm_cls_losses)
odm_cls_loss_weight = paddle.to_tensor(
self.cls_loss_weight[1], dtype='float32', stop_gradient=True)
odm_cls_loss = odm_cls_loss * odm_cls_loss_weight
odm_reg_loss = paddle.add_n(odm_bbox_losses)
return odm_cls_loss, odm_reg_loss
def get_loss(self, head_outs, inputs):
fam_cls_list, fam_reg_list, odm_cls_list, odm_reg_list, \
num_anchors_list, base_anchors_list, refine_anchors_list = head_outs
# compute loss
fam_cls_loss_lst = []
fam_reg_loss_lst = []
odm_cls_loss_lst = []
odm_reg_loss_lst = []
batch = len(inputs['gt_rbox'])
for i in range(batch):
# data_format: (xc, yc, w, h, theta)
gt_mask = inputs['pad_gt_mask'][i, :, 0]
gt_idx = paddle.nonzero(gt_mask).squeeze(-1)
gt_bboxes = paddle.gather(inputs['gt_rbox'][i], gt_idx).numpy()
gt_labels = paddle.gather(inputs['gt_class'][i], gt_idx).numpy()
is_crowd = paddle.gather(inputs['is_crowd'][i], gt_idx).numpy()
gt_labels = gt_labels + 1
anchors_per_image = np.concatenate(base_anchors_list)
fam_cls_per_image = [t[i] for t in fam_cls_list]
fam_reg_per_image = [t[i] for t in fam_reg_list]
odm_cls_per_image = [t[i] for t in odm_cls_list]
odm_reg_per_image = [t[i] for t in odm_reg_list]
im_s2anet_head_out = (fam_cls_per_image, fam_reg_per_image,
odm_cls_per_image, odm_reg_per_image,
num_anchors_list)
# FAM
im_fam_target = self.anchor_assign(anchors_per_image, gt_bboxes,
gt_labels, is_crowd)
if im_fam_target is not None:
im_fam_cls_loss, im_fam_reg_loss = self.get_fam_loss(
im_fam_target, im_s2anet_head_out, self.reg_loss_type)
fam_cls_loss_lst.append(im_fam_cls_loss)
fam_reg_loss_lst.append(im_fam_reg_loss)
# ODM
refine_anchors_per_image = [t[i] for t in refine_anchors_list]
refine_anchors_per_image = paddle.concat(
refine_anchors_per_image).numpy()
im_odm_target = self.anchor_assign(refine_anchors_per_image,
gt_bboxes, gt_labels, is_crowd)
if im_odm_target is not None:
im_odm_cls_loss, im_odm_reg_loss = self.get_odm_loss(
im_odm_target, im_s2anet_head_out, self.reg_loss_type)
odm_cls_loss_lst.append(im_odm_cls_loss)
odm_reg_loss_lst.append(im_odm_reg_loss)
fam_cls_loss = paddle.add_n(fam_cls_loss_lst) / batch
fam_reg_loss = paddle.add_n(fam_reg_loss_lst) / batch
odm_cls_loss = paddle.add_n(odm_cls_loss_lst) / batch
odm_reg_loss = paddle.add_n(odm_reg_loss_lst) / batch
loss = fam_cls_loss + fam_reg_loss + odm_cls_loss + odm_reg_loss
return {
'loss': loss,
'fam_cls_loss': fam_cls_loss,
'fam_reg_loss': fam_reg_loss,
'odm_cls_loss': odm_cls_loss,
'odm_reg_loss': odm_reg_loss
}
def bbox_decode(self, preds, anchors, wh_ratio_clip=1e-6):
"""decode bbox from deltas
Args:
preds: [B, L, 5]
anchors: [1, L, 5]
return:
bboxes: [B, L, 5]
"""
preds = paddle.add(paddle.multiply(preds, self.stds), self.means)
dx, dy, dw, dh, dangle = paddle.split(preds, 5, axis=-1)
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)
dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)
rroi_x, rroi_y, rroi_w, rroi_h, rroi_angle = paddle.split(
anchors, 5, axis=-1)
gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(
rroi_angle) + rroi_x
gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(
rroi_angle) + rroi_y
gw = rroi_w * dw.exp()
gh = rroi_h * dh.exp()
ga = np.pi * dangle + rroi_angle
ga = (ga + np.pi / 4) % np.pi - np.pi / 4
bboxes = paddle.concat([gx, gy, gw, gh, ga], axis=-1)
return bboxes
def rbox2poly(self, rboxes):
"""
rboxes: [x_ctr,y_ctr,w,h,angle]
to
polys: [x0,y0,x1,y1,x2,y2,x3,y3]
"""
N = paddle.shape(rboxes)[0]
x_ctr = rboxes[:, 0]
y_ctr = rboxes[:, 1]
width = rboxes[:, 2]
height = rboxes[:, 3]
angle = rboxes[:, 4]
tl_x, tl_y, br_x, br_y = -width * 0.5, -height * 0.5, width * 0.5, height * 0.5
normal_rects = paddle.stack(
[tl_x, br_x, br_x, tl_x, tl_y, tl_y, br_y, br_y], axis=0)
normal_rects = paddle.reshape(normal_rects, [2, 4, N])
normal_rects = paddle.transpose(normal_rects, [2, 0, 1])
sin, cos = paddle.sin(angle), paddle.cos(angle)
# M: [N,2,2]
M = paddle.stack([cos, -sin, sin, cos], axis=0)
M = paddle.reshape(M, [2, 2, N])
M = paddle.transpose(M, [2, 0, 1])
# polys: [N,8]
polys = paddle.matmul(M, normal_rects)
polys = paddle.transpose(polys, [2, 1, 0])
polys = paddle.reshape(polys, [-1, N])
polys = paddle.transpose(polys, [1, 0])
tmp = paddle.stack(
[x_ctr, y_ctr, x_ctr, y_ctr, x_ctr, y_ctr, x_ctr, y_ctr], axis=1)
polys = polys + tmp
return polys
| PaddleDetection/ppdet/modeling/heads/s2anet_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/s2anet_head.py",
"repo_id": "PaddleDetection",
"token_count": 17006
} | 75 |
import paddle
def line_iou(pred, target, img_w, length=15, aligned=True):
'''
Calculate the line iou value between predictions and targets
Args:
pred: lane predictions, shape: (num_pred, 72)
target: ground truth, shape: (num_target, 72)
img_w: image width
length: extended radius
aligned: True for iou loss calculation, False for pair-wise ious in assign
'''
px1 = pred - length
px2 = pred + length
tx1 = target - length
tx2 = target + length
if aligned:
invalid_mask = target
ovr = paddle.minimum(px2, tx2) - paddle.maximum(px1, tx1)
union = paddle.maximum(px2, tx2) - paddle.minimum(px1, tx1)
else:
num_pred = pred.shape[0]
invalid_mask = target.tile([num_pred, 1, 1])
ovr = (paddle.minimum(px2[:, None, :], tx2[None, ...]) - paddle.maximum(
px1[:, None, :], tx1[None, ...]))
union = (paddle.maximum(px2[:, None, :], tx2[None, ...]) -
paddle.minimum(px1[:, None, :], tx1[None, ...]))
invalid_masks = (invalid_mask < 0) | (invalid_mask >= img_w)
ovr[invalid_masks] = 0.
union[invalid_masks] = 0.
iou = ovr.sum(axis=-1) / (union.sum(axis=-1) + 1e-9)
return iou
def liou_loss(pred, target, img_w, length=15):
return (1 - line_iou(pred, target, img_w, length)).mean()
| PaddleDetection/ppdet/modeling/losses/clrnet_line_iou_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/losses/clrnet_line_iou_loss.py",
"repo_id": "PaddleDetection",
"token_count": 608
} | 76 |