text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import datetime
import time
import shutil
import sys
import numpy as np
import argparse
import struct
import cv2
import mxnet as mx
from mxnet import ndarray as nd
feature_dim = 512
feature_ext = 1
def load_bin(path, fill=0.0):
with open(path, 'rb') as f:
bb = f.read(4 * 4)
#print(len(bb))
v = struct.unpack('4i', bb)
#print(v[0])
bb = f.read(v[0] * 4)
v = struct.unpack("%df" % (v[0]), bb)
feature = np.full((feature_dim + feature_ext, ),
fill,
dtype=np.float32)
feature[0:feature_dim] = v
#feature = np.array( v, dtype=np.float32)
#print(feature.shape)
#print(np.linalg.norm(feature))
return feature
def write_bin(path, feature):
feature = list(feature)
with open(path, 'wb') as f:
f.write(struct.pack('4i', len(feature), 1, 4, 5))
f.write(struct.pack("%df" % len(feature), *feature))
def main(args):
fs_noise_map = {}
for line in open(args.facescrub_noises, 'r'):
if line.startswith('#'):
continue
line = line.strip()
fname = line.split('.')[0]
p = fname.rfind('_')
fname = fname[0:p]
fs_noise_map[line] = fname
print(len(fs_noise_map))
i = 0
fname2center = {}
noises = []
for line in open(args.facescrub_lst, 'r'):
if i % 1000 == 0:
print("reading fs", i)
i += 1
image_path = line.strip()
_path = image_path.split('/')
a, b = _path[-2], _path[-1]
feature_path = os.path.join(args.feature_dir_input, 'facescrub', a,
"%s_%s.bin" % (b, args.algo))
feature_dir_out = os.path.join(args.feature_dir_out, 'facescrub', a)
if not os.path.exists(feature_dir_out):
os.makedirs(feature_dir_out)
feature_path_out = os.path.join(feature_dir_out,
"%s_%s.bin" % (b, args.algo))
#print(b)
if not b in fs_noise_map:
#shutil.copyfile(feature_path, feature_path_out)
feature = load_bin(feature_path)
write_bin(feature_path_out, feature)
if not a in fname2center:
fname2center[a] = np.zeros((feature_dim + feature_ext, ),
dtype=np.float32)
fname2center[a] += feature
else:
#print('n', b)
noises.append((a, b))
print(len(noises))
for k in noises:
a, b = k
assert a in fname2center
center = fname2center[a]
g = np.zeros((feature_dim + feature_ext, ), dtype=np.float32)
g2 = np.random.uniform(-0.001, 0.001, (feature_dim, ))
g[0:feature_dim] = g2
f = center + g
_norm = np.linalg.norm(f)
f /= _norm
feature_path_out = os.path.join(args.feature_dir_out, 'facescrub', a,
"%s_%s.bin" % (b, args.algo))
write_bin(feature_path_out, f)
mf_noise_map = {}
for line in open(args.megaface_noises, 'r'):
if line.startswith('#'):
continue
line = line.strip()
_vec = line.split("\t")
if len(_vec) > 1:
line = _vec[1]
mf_noise_map[line] = 1
print(len(mf_noise_map))
i = 0
nrof_noises = 0
for line in open(args.megaface_lst, 'r'):
if i % 1000 == 0:
print("reading mf", i)
i += 1
image_path = line.strip()
_path = image_path.split('/')
a1, a2, b = _path[-3], _path[-2], _path[-1]
feature_path = os.path.join(args.feature_dir_input, 'megaface', a1, a2,
"%s_%s.bin" % (b, args.algo))
feature_dir_out = os.path.join(args.feature_dir_out, 'megaface', a1,
a2)
if not os.path.exists(feature_dir_out):
os.makedirs(feature_dir_out)
feature_path_out = os.path.join(feature_dir_out,
"%s_%s.bin" % (b, args.algo))
bb = '/'.join([a1, a2, b])
#print(b)
if not bb in mf_noise_map:
feature = load_bin(feature_path)
write_bin(feature_path_out, feature)
#shutil.copyfile(feature_path, feature_path_out)
else:
feature = load_bin(feature_path, 100.0)
write_bin(feature_path_out, feature)
#g = np.random.uniform(-0.001, 0.001, (feature_dim,))
#print('n', bb)
#write_bin(feature_path_out, g)
nrof_noises += 1
print(nrof_noises)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--facescrub-noises',
type=str,
help='',
default='./data/facescrub_noises.txt')
parser.add_argument('--megaface-noises',
type=str,
help='',
default='./data/megaface_noises.txt')
parser.add_argument('--algo', type=str, help='', default='insightface')
parser.add_argument('--facescrub-lst',
type=str,
help='',
default='./data/facescrub_lst')
parser.add_argument('--megaface-lst',
type=str,
help='',
default='./data/megaface_lst')
parser.add_argument('--feature-dir-input',
type=str,
help='',
default='./feature_out')
parser.add_argument('--feature-dir-out',
type=str,
help='',
default='./feature_out_clean')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| insightface/recognition/_evaluation_/megaface/remove_noises.py/0 | {
"file_path": "insightface/recognition/_evaluation_/megaface/remove_noises.py",
"repo_id": "insightface",
"token_count": 3347
} | 121 |
from .ir_resnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200
def get_model(name, **kwargs):
if name == "r18":
return iresnet18(False, **kwargs)
elif name == "r34":
return iresnet34(False, **kwargs)
elif name == "r50":
return iresnet50(False, **kwargs)
elif name == "r100":
return iresnet100(False, **kwargs)
elif name == "r200":
return iresnet200(False, **kwargs)
else:
raise ValueError()
| insightface/recognition/arcface_oneflow/backbones/__init__.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/backbones/__init__.py",
"repo_id": "insightface",
"token_count": 222
} | 122 |
import importlib
import os.path as osp
def get_config(config_file):
assert config_file.startswith(
"configs/"
), "config file setting must start with configs/"
temp_config_name = osp.basename(config_file)
temp_module_name = osp.splitext(temp_config_name)[0]
config = importlib.import_module("configs.base")
cfg = config.config
config = importlib.import_module("configs.%s" % temp_module_name)
job_cfg = config.config
cfg.update(job_cfg)
if cfg.output is None:
cfg.output = osp.join("work_dirs", temp_module_name)
return cfg
| insightface/recognition/arcface_oneflow/utils/utils_config.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/utils/utils_config.py",
"repo_id": "insightface",
"token_count": 234
} | 123 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import struct
import random
import multiprocessing
import numpy as np
import cv2
import json
def readkv(f):
"""readkv"""
keylendata = f.read(4)
if len(keylendata) != 4:
return None
keylen = struct.unpack('I', keylendata)[0]
if keylen > 5000:
raise Exception('wrong key len' + str(keylen))
key = f.read(keylen)
valuelen = struct.unpack('I', f.read(4))[0]
value = f.read(valuelen)
return key, value
def writekv(f, k, v, flush=True):
"""writekv"""
f.write(struct.pack('I', len(k)))
f.write(k)
f.write(struct.pack('I', len(v)))
f.write(v)
if flush:
f.flush()
return
def trans_img_to_bin(img_name, output_path):
with open(img_name, "rb") as fin:
img = fin.read()
key = os.path.split(img_name)[-1]
with open(output_path, "wb") as fout:
writekv(fout, key.encode(), pickle.dumps(img, -1))
return
def read_img_from_bin(input_path):
# the file can exist many key-vals, but it just save one in fact.
with open(input_path, "rb") as fin:
r = readkv(fin)
assert r is not None
_, value = r
value = pickle.loads(value)
value = np.frombuffer(value, dtype='uint8')
img = cv2.imdecode(value, 1)
return img
| insightface/recognition/arcface_paddle/datasets/kv_helper.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/datasets/kv_helper.py",
"repo_id": "insightface",
"token_count": 755
} | 124 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import math
import os
import paddle
import paddle.nn as nn
class LargeScaleClassifier(nn.Layer):
"""
Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint,
Partial FC: Training 10 Million Identities on a Single Machine
See the original paper:
https://arxiv.org/abs/2010.05222
"""
@paddle.no_grad()
def __init__(self,
rank,
world_size,
num_classes,
margin1=1.0,
margin2=0.5,
margin3=0.0,
scale=64.0,
sample_ratio=1.0,
embedding_size=512,
fp16=False,
name=None):
super(LargeScaleClassifier, self).__init__()
self.num_classes: int = num_classes
self.rank: int = rank
self.world_size: int = world_size
self.sample_ratio: float = sample_ratio
self.embedding_size: int = embedding_size
self.fp16 = fp16
self.num_local: int = (num_classes + world_size - 1) // world_size
if num_classes % world_size != 0 and rank == world_size - 1:
self.num_local = num_classes % self.num_local
self.num_sample: int = int(self.sample_ratio * self.num_local)
self.margin1 = margin1
self.margin2 = margin2
self.margin3 = margin3
self.logit_scale = scale
self._parameter_list = []
if name is None:
name = 'dist@fc@rank@%05d.w' % rank
assert '.w' in name
stddev = math.sqrt(2.0 / (self.embedding_size + self.num_local))
param_attr = paddle.ParamAttr(
name=name, initializer=paddle.nn.initializer.Normal(std=stddev))
self.index = None
self.weight = self.create_parameter(
shape=[self.embedding_size, self.num_local],
attr=param_attr,
is_bias=False,
dtype='float16' if self.fp16 else 'float32')
self.weight.is_distributed = True
if int(self.sample_ratio) < 1:
self.weight.stop_gradient = True
def step(self, optimizer):
if int(self.sample_ratio) < 1:
warnings.warn(
"Explicitly call the function paddle._C_ops.sparse_momentum is a temporary manner. "
"We will merge it to optimizer in the future, please don't follow.")
found_inf = paddle.logical_not(
paddle.all(paddle.isfinite(self._parameter_list[0].grad)))
if found_inf:
print('Found inf or nan in classifier')
else:
if self.weight.name not in optimizer._accumulators[
optimizer._velocity_acc_str]:
optimizer._add_accumulator(optimizer._velocity_acc_str,
self.weight)
velocity = optimizer._accumulators[
optimizer._velocity_acc_str][self.weight.name]
_, _ = paddle._C_ops.sparse_momentum(
self.weight,
self._parameter_list[0].grad,
velocity,
self.index,
paddle.to_tensor(
optimizer.get_lr(), dtype='float32'),
self.weight,
velocity,
'mu',
optimizer._momentum,
'use_nesterov',
optimizer._use_nesterov,
'regularization_method',
optimizer._regularization_method,
'regularization_coeff',
optimizer._regularization_coeff,
'axis',
1)
def clear_grad(self):
self._parameter_list = []
def forward(self, feature, label):
if self.world_size > 1:
feature_list = []
paddle.distributed.all_gather(feature_list, feature)
total_feature = paddle.concat(feature_list, axis=0)
label_list = []
paddle.distributed.all_gather(label_list, label)
total_label = paddle.concat(label_list, axis=0)
total_label.stop_gradient = True
else:
total_feature = feature
total_label = label
if self.sample_ratio < 1.0:
# partial fc sample process
total_label, self.index = paddle.nn.functional.class_center_sample(
total_label, self.num_local, self.num_sample)
total_label.stop_gradient = True
self.index.stop_gradient = True
self.sub_weight = paddle.gather(self.weight, self.index, axis=1)
self.sub_weight.stop_gradient = False
self._parameter_list.append(self.sub_weight)
else:
self.sub_weight = self.weight
norm_feature = paddle.fluid.layers.l2_normalize(total_feature, axis=1)
norm_weight = paddle.fluid.layers.l2_normalize(self.sub_weight, axis=0)
local_logit = paddle.matmul(norm_feature, norm_weight)
loss = paddle.nn.functional.margin_cross_entropy(
local_logit,
total_label,
margin1=self.margin1,
margin2=self.margin2,
margin3=self.margin3,
scale=self.logit_scale,
return_softmax=False,
reduction=None, )
loss = paddle.mean(loss)
return loss
| insightface/recognition/arcface_paddle/dynamic/classifiers/lsc.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/dynamic/classifiers/lsc.py",
"repo_id": "insightface",
"token_count": 2995
} | 125 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import sys
import numpy as np
import paddle
from visualdl import LogWriter
from utils.logging import AverageMeter, init_logging, CallBackLogging
from utils import losses
from .utils.optimization_pass import gather_optimization_pass, amp_pass
from . import classifiers
from . import backbones
class StaticModel(object):
def __init__(self,
main_program,
startup_program,
backbone_class_name,
embedding_size,
classifier_class_name=None,
num_classes=None,
sample_ratio=0.1,
lr_scheduler=None,
momentum=0.9,
weight_decay=2e-4,
dropout=0.4,
mode='train',
fp16=False,
fp16_configs=None,
margin_loss_params=None):
rank = int(os.getenv("PADDLE_TRAINER_ID", 0))
world_size = int(os.getenv("PADDLE_TRAINERS_NUM", 1))
if world_size > 1:
import paddle.distributed.fleet as fleet
self.main_program = main_program
self.startup_program = startup_program
self.backbone_class_name = backbone_class_name
self.embedding_size = embedding_size
self.classifier_class_name = classifier_class_name
self.num_classes = num_classes
self.sample_ratio = sample_ratio
self.lr_scheduler = lr_scheduler
self.momentum = momentum
self.weight_decay = weight_decay
self.mode = mode
self.fp16 = fp16
self.fp16_configs = fp16_configs
self.margin_loss_params = margin_loss_params
if self.mode == 'train':
assert self.classifier_class_name is not None
assert self.num_classes is not None
assert self.lr_scheduler is not None
assert self.margin_loss_params is not None
with paddle.static.program_guard(self.main_program,
self.startup_program):
with paddle.utils.unique_name.guard():
self.backbone = eval("backbones.{}".format(
self.backbone_class_name))(
num_features=self.embedding_size,
is_train=True,
fp16=self.fp16,
dropout=dropout)
assert 'label' in self.backbone.input_dict
assert 'feature' in self.backbone.output_dict
self.classifier = eval("classifiers.{}".format(
self.classifier_class_name))(
feature=self.backbone.output_dict['feature'],
label=self.backbone.input_dict['label'],
rank=rank,
world_size=world_size,
num_classes=self.num_classes,
margin1=self.margin_loss_params.margin1,
margin2=self.margin_loss_params.margin2,
margin3=self.margin_loss_params.margin3,
scale=self.margin_loss_params.scale,
sample_ratio=self.sample_ratio,
embedding_size=self.embedding_size)
assert 'loss' in self.classifier.output_dict
self.optimizer = paddle.optimizer.Momentum(
learning_rate=self.lr_scheduler,
momentum=self.momentum,
weight_decay=paddle.regularizer.L2Decay(
self.weight_decay))
if self.fp16:
assert self.fp16_configs is not None
self.optimizer = paddle.static.amp.decorate(
optimizer=self.optimizer,
init_loss_scaling=self.fp16_configs[
'init_loss_scaling'],
incr_every_n_steps=self.fp16_configs[
'incr_every_n_steps'],
decr_every_n_nan_or_inf=self.fp16_configs[
'decr_every_n_nan_or_inf'],
incr_ratio=self.fp16_configs['incr_ratio'],
decr_ratio=self.fp16_configs['decr_ratio'],
use_dynamic_loss_scaling=self.fp16_configs[
'use_dynamic_loss_scaling'],
use_pure_fp16=self.fp16_configs['use_pure_fp16'],
amp_lists=paddle.static.amp.
AutoMixedPrecisionLists(
custom_white_list=self.fp16_configs[
'custom_white_list'],
custom_black_list=self.fp16_configs[
'custom_black_list'], ),
use_fp16_guard=False)
if world_size > 1:
dist_optimizer = fleet.distributed_optimizer(
self.optimizer)
dist_optimizer.minimize(self.classifier.output_dict[
'loss'])
else:
self.optimizer.minimize(self.classifier.output_dict[
'loss'])
if self.fp16:
self.optimizer = self.optimizer._optimizer
if self.sample_ratio < 1.0:
gather_optimization_pass(self.main_program,
'dist@fc@rank')
if self.fp16:
amp_pass(self.main_program, 'dist@fc@rank')
elif self.mode == 'test':
with paddle.static.program_guard(self.main_program,
self.startup_program):
with paddle.utils.unique_name.guard():
self.backbone = eval("backbones.{}".format(
self.backbone_class_name))(
num_features=self.embedding_size,
is_train=False,
fp16=self.fp16,
dropout=dropout)
assert 'feature' in self.backbone.output_dict
else:
raise ValueError(
"mode is error, only support 'train' and 'test' now.")
| insightface/recognition/arcface_paddle/static/static_model.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/static/static_model.py",
"repo_id": "insightface",
"token_count": 4073
} | 126 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import logging
sys.path.insert(0, os.path.abspath('.'))
import argparse
def str2bool(v):
return str(v).lower() in ("true", "t", "1")
def tostrlist(v):
if isinstance(v, list):
return v
elif isinstance(v, str):
return [e.strip() for e in v.split(',')]
def parse_args():
parser = argparse.ArgumentParser(description='Paddle Face Exporter')
# Model setting
parser.add_argument(
'--is_static',
type=str2bool,
default='False',
help='whether to use static mode')
parser.add_argument(
'--backbone',
type=str,
default='FresResNet50',
help='backbone network')
parser.add_argument(
'--embedding_size', type=int, default=512, help='embedding size')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='MS1M_v3_arcface/FresResNet50/24/',
help='checkpoint direcotry')
parser.add_argument(
'--data_dir',
type=str,
default='./MS1M_v3_bin',
help='train dataset directory')
parser.add_argument(
'--val_targets',
type=tostrlist,
default=["lfw", "cfp_fp", "agedb_30"],
help='val targets, list or str split by comma')
parser.add_argument(
'--batch_size', type=int, default=128, help='test batch size')
args = parser.parse_args()
return args
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO, format="Validation: %(asctime)s - %(message)s")
args = parse_args()
if args.is_static:
import paddle
paddle.enable_static()
from static.validation import validation
else:
from dynamic.validation import validation
validation(args)
| insightface/recognition/arcface_paddle/tools/validation.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/tools/validation.py",
"repo_id": "insightface",
"token_count": 936
} | 127 |
## Test Training Speed
- Test Commands
You need to use the following two commands to test the Partial FC training performance.
The number of identites is **3 millions** (synthetic data), turn mixed precision training on, backbone is resnet50,
batch size is 1024.
```shell
# Model Parallel
python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions
# Partial FC 0.1
python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions_pfc
```
- GPU Memory
```
# (Model Parallel) gpustat -i
[0] Tesla V100-SXM2-32GB | 64'C, 94 % | 30338 / 32510 MB
[1] Tesla V100-SXM2-32GB | 60'C, 99 % | 28876 / 32510 MB
[2] Tesla V100-SXM2-32GB | 60'C, 99 % | 28872 / 32510 MB
[3] Tesla V100-SXM2-32GB | 69'C, 99 % | 28872 / 32510 MB
[4] Tesla V100-SXM2-32GB | 66'C, 99 % | 28888 / 32510 MB
[5] Tesla V100-SXM2-32GB | 60'C, 99 % | 28932 / 32510 MB
[6] Tesla V100-SXM2-32GB | 68'C, 100 % | 28916 / 32510 MB
[7] Tesla V100-SXM2-32GB | 65'C, 99 % | 28860 / 32510 MB
# (Partial FC 0.1) gpustat -i
[0] Tesla V100-SXM2-32GB | 60'C, 95 % | 10488 / 32510 MB │·······················
[1] Tesla V100-SXM2-32GB | 60'C, 97 % | 10344 / 32510 MB │·······················
[2] Tesla V100-SXM2-32GB | 61'C, 95 % | 10340 / 32510 MB │·······················
[3] Tesla V100-SXM2-32GB | 66'C, 95 % | 10340 / 32510 MB │·······················
[4] Tesla V100-SXM2-32GB | 65'C, 94 % | 10356 / 32510 MB │·······················
[5] Tesla V100-SXM2-32GB | 61'C, 95 % | 10400 / 32510 MB │·······················
[6] Tesla V100-SXM2-32GB | 68'C, 96 % | 10384 / 32510 MB │·······················
[7] Tesla V100-SXM2-32GB | 64'C, 95 % | 10328 / 32510 MB │·······················
```
- Training Speed
```python
# (Model Parallel) trainging.log
Training: Speed 2271.33 samples/sec Loss 1.1624 LearningRate 0.2000 Epoch: 0 Global Step: 100
Training: Speed 2269.94 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150
Training: Speed 2272.67 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200
Training: Speed 2266.55 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250
Training: Speed 2272.54 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300
# (Partial FC 0.1) trainging.log
Training: Speed 5299.56 samples/sec Loss 1.0965 LearningRate 0.2000 Epoch: 0 Global Step: 100
Training: Speed 5296.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150
Training: Speed 5304.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200
Training: Speed 5274.43 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250
Training: Speed 5300.10 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300
```
In this test case, Partial FC 0.1 only use1 1/3 of the GPU memory of the model parallel,
and the training speed is 2.5 times faster than the model parallel.
## Speed Benchmark
1. Training speed of different parallel methods (samples/second), Tesla V100 32GB * 8. (Larger is better)
| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
| :--- | :--- | :--- | :--- |
|125000 | 4681 | 4824 | 5004 |
|250000 | 4047 | 4521 | 4976 |
|500000 | 3087 | 4013 | 4900 |
|1000000 | 2090 | 3449 | 4803 |
|1400000 | 1672 | 3043 | 4738 |
|2000000 | - | 2593 | 4626 |
|4000000 | - | 1748 | 4208 |
|5500000 | - | 1389 | 3975 |
|8000000 | - | - | 3565 |
|16000000 | - | - | 2679 |
|29000000 | - | - | 1855 |
2. GPU memory cost of different parallel methods (GB per GPU), Tesla V100 32GB * 8. (Smaller is better)
| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
| :--- | :--- | :--- | :--- |
|125000 | 7358 | 5306 | 4868 |
|250000 | 9940 | 5826 | 5004 |
|500000 | 14220 | 7114 | 5202 |
|1000000 | 23708 | 9966 | 5620 |
|1400000 | 32252 | 11178 | 6056 |
|2000000 | - | 13978 | 6472 |
|4000000 | - | 23238 | 8284 |
|5500000 | - | 32188 | 9854 |
|8000000 | - | - | 12310 |
|16000000 | - | - | 19950 |
|29000000 | - | - | 32324 |
| insightface/recognition/arcface_torch/docs/speed_benchmark.md/0 | {
"file_path": "insightface/recognition/arcface_torch/docs/speed_benchmark.md",
"repo_id": "insightface",
"token_count": 2916
} | 128 |
import os
import cv2
import numpy as np
from skimage import transform as trans
src = np.array([[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
[33.5493, 92.3655], [62.7299, 92.2041]],
dtype=np.float32)
src[:, 0] += 8.0
img_path = '/data/anxiang/datasets/IJB_release/IJBC/loose_crop'
img_path_align = '/data/anxiang/datasets/IJB_release/IJBC/loose_crop_align'
img_list_path = '/data/anxiang/datasets/IJB_release/IJBC/meta/ijbc_name_5pts_score.txt'
img_list = open(img_list_path)
files = img_list.readlines()
for img_index, each_line in enumerate(files):
if img_index % 500 == 0:
print('processing', img_index)
name_lmk_score = each_line.strip().split(' ')
img_name = os.path.join(img_path, name_lmk_score[0])
img = cv2.imread(img_name)
landmark = np.array([float(x) for x in name_lmk_score[1:-1]],
dtype=np.float32)
landmark = landmark.reshape((5, 2))
if landmark.shape[0] == 68:
landmark5 = np.zeros((5, 2), dtype=np.float32)
landmark5[0] = (landmark[36] + landmark[39]) / 2
landmark5[1] = (landmark[42] + landmark[45]) / 2
landmark5[2] = landmark[30]
landmark5[3] = landmark[48]
landmark5[4] = landmark[54]
else:
landmark5 = landmark
tform = trans.SimilarityTransform()
tform.estimate(landmark5, src)
M = tform.params[0:2, :]
img = cv2.warpAffine(img, M, (112, 112), borderValue=0.0)
cv2.imwrite(os.path.join(img_path_align, name_lmk_score[0]), img)
| insightface/recognition/partial_fc/mxnet/evaluation/align_ijb.py/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/evaluation/align_ijb.py",
"repo_id": "insightface",
"token_count": 729
} | 129 |
#! /bin/bash
HOROVOD_GPU_ALLREDUCE=NCCL HOROVOD_GPU_BROADCAST=NCCL pip install --no-cache-dir horovod==0.19.2
| insightface/recognition/partial_fc/mxnet/setup-utils/install-horovod.sh/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/setup-utils/install-horovod.sh",
"repo_id": "insightface",
"token_count": 53
} | 130 |
# Variational Prototype Learning for Deep Face Recognition
This is the Pytorch implementation of our paper [Variational Prototype Learning for Deep Face Recognition](https://openaccess.thecvf.com/content/CVPR2021/papers/Deng_Variational_Prototype_Learning_for_Deep_Face_Recognition_CVPR_2021_paper.pdf) which is accepted by CVPR-2021.
## How to run
Define a new configure file such as `configs/example_ms1m.py`, and start the training process by:
``
bash run.sh configs/example_ms1m.py
``
## Results
Results on WebFace600K(subset of WebFace260M), loss is margin-based softmax.
| Backbone | Dataset | VPL? | Mask | Children | African | Caucasian | South Asian | East Asian | MR-All |
|------------|------------|------------|--------|----------|---------|-----------|-------------|------------|--------|
| R50 | WebFace600K | NO | 78.949 | 74.772 | 89.231 | 94.114 | 92.308 | 73.765 | 90.591 |
| R50 | WebFace600K | YES | 78.884 | 75.739 | 89.424 | 94.220 | 92.609 | 74.365 | 90.942 |
| insightface/recognition/vpl/README.md/0 | {
"file_path": "insightface/recognition/vpl/README.md",
"repo_id": "insightface",
"token_count": 364
} | 131 |
import torch
""" Positional encoding embedding. Code was taken from https://github.com/bmild/nerf. """
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2. ** torch.linspace(0., max_freq, N_freqs)
else:
freq_bands = torch.linspace(2.**0., 2.**max_freq, N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn,
freq=freq: p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires):
embed_kwargs = {
'include_input': True,
'input_dims': 3,
'max_freq_log2': multires-1,
'num_freqs': multires,
'log_sampling': True,
'periodic_fns': [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
def embed(x, eo=embedder_obj): return eo.embed(x)
return embed, embedder_obj.out_dim
| insightface/reconstruction/PBIDR/code/model/embedder.py/0 | {
"file_path": "insightface/reconstruction/PBIDR/code/model/embedder.py",
"repo_id": "insightface",
"token_count": 784
} | 132 |
import tensorflow as tf
from external.landmark_detector import utils, models, data_provider
from tensorflow.python.platform import tf_logging as logging
slim = tf.contrib.slim
from external.landmark_detector.flags import FLAGS
# general framework
class DeepNetwork(object):
def __init__(self):
pass
def _build_network(self, inputs, datas):
pass
def _build_losses(self, predictions, states, images, datas):
pass
def _build_summaries(self, predictions, states, images, datas):
tf.summary.image('images', images[:, :, :, :3], max_outputs=min(FLAGS['batch_size'], 3))
def _get_data(self):
provider = data_provider.ProtobuffProvider(
filename=FLAGS['dataset_dir'],
batch_size=FLAGS['batch_size'],
rescale=FLAGS['rescale'],
augmentation=FLAGS['eval_dir']=='',
)
return provider.get()
def _build_restore_fn(self, sess):
init_fn = None
if FLAGS['pretrained_model_checkpoint_path']:
print('Loading whole model ...')
variables_to_restore = slim.get_model_variables()
init_fn = slim.assign_from_checkpoint_fn(
FLAGS['pretrained_model_checkpoint_path'],
variables_to_restore,
ignore_missing_vars=True)
return init_fn
def train(self):
g = tf.Graph()
logging.set_verbosity(10)
with g.as_default():
# Load datasets.
images, *datas = self._get_data()
images /= 255.
# Define model graph.
with tf.variable_scope('net'):
with slim.arg_scope([slim.batch_norm, slim.layers.dropout],
is_training=True):
predictions, states = self._build_network(images, datas)
# custom losses
self._build_losses(predictions, states, images, datas)
# total losses
total_loss = slim.losses.get_total_loss()
tf.summary.scalar('losses/total loss', total_loss)
# image summaries
self._build_summaries(predictions, states, images, datas)
# learning rate decay
global_step = slim.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
FLAGS['initial_learning_rate'],
global_step,
FLAGS['learning_rate_decay_step'] / FLAGS['batch_size'],
FLAGS['learning_rate_decay_factor'],
staircase=True)
tf.summary.scalar('learning rate', learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
with tf.Session(graph=g) as sess:
init_fn = self._build_restore_fn(sess)
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
summarize_gradients=True)
logging.set_verbosity(1)
slim.learning.train(train_op,
FLAGS['train_dir'],
save_summaries_secs=60,
init_fn=init_fn,
save_interval_secs=600)
class DNFaceMultiView(DeepNetwork):
def __init__(self, n_lms=FLAGS['n_landmarks']):
super(DNFaceMultiView, self).__init__()
self.n_lms = n_lms
def _get_data(self):
provider = data_provider.ProtobuffProvider(
filename=FLAGS['dataset_dir'],
batch_size=FLAGS['batch_size'],
rescale=FLAGS['rescale'],
augmentation=FLAGS['eval_dir']=='',
)
return provider.get()
def _build_network(self, inputs, datas=None, n_stacks=1, n_channels=FLAGS['n_landmarks'], is_training=True):
# gt_heatmap, gt_lms, mask_index, gt_mask = datas
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
net = inputs
# net = models.StackedHourglass(net, FLAGS.n_landmarks)
# states.append(net)
# net = tf.stop_gradient(net)
# net *= gt_mask[:,None,None,:]
# net = tf.concat([inputs,net], 3)
# net = models.StackedHourglass(net, FLAGS.n_landmarks)
# states.append(net)
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
channels = tf.shape(inputs)[3]
states = []
with slim.arg_scope([slim.batch_norm, slim.layers.dropout], is_training=is_training):
with slim.arg_scope(models.hourglass_arg_scope_tf()):
net = None
# stacked hourglass
for i in range(n_stacks):
with tf.variable_scope('stack_%02d' % i):
if net is not None:
net = tf.concat((inputs, net), 3)
else:
net = inputs
net, _ = models.hourglass(
net,
regression_channels=n_channels,
classification_channels=0,
deconv='transpose',
bottleneck='bottleneck_inception')
states.append(net)
prediction = net
return prediction, states
def _build_losses(self, predictions, states, images, datas):
gt_heatmap, gt_lms, mask_index, gt_mask = datas
weight_hm = utils.get_weight(gt_heatmap, tf.ones_like(gt_heatmap), ng_w=0.1, ps_w=1) * 500
weight_hm *= gt_mask[:,None,None,:]
l2norm = slim.losses.mean_squared_error(states[0], gt_heatmap, weights=weight_hm)
tf.summary.scalar('losses/lms_pred', l2norm)
def _build_summaries(self, predictions, states, images, datas):
super()._build_summaries(predictions, states, images, datas)
gt_heatmap, gt_lms, mask_index, gt_mask = datas
tf.summary.image('predictions/landmark-regression', tf.reduce_sum(predictions, -1)[...,None] * 255.0, max_outputs=min(FLAGS['batch_size'],3))
| insightface/reconstruction/ostec/external/landmark_detector/networks.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/landmark_detector/networks.py",
"repo_id": "insightface",
"token_count": 3213
} | 133 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Linear Separability (LS)."""
from collections import defaultdict
import numpy as np
import sklearn.svm
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
classifier_urls = [
'https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX', # celebahq-classifier-00-male.pkl
'https://drive.google.com/uc?id=1Q5c6HE__ReW2W8qYAXpao68V1ryuisGo', # celebahq-classifier-01-smiling.pkl
'https://drive.google.com/uc?id=1Q7738mgWTljPOJQrZtSMLxzShEhrvVsU', # celebahq-classifier-02-attractive.pkl
'https://drive.google.com/uc?id=1QBv2Mxe7ZLvOv1YBTLq-T4DS3HjmXV0o', # celebahq-classifier-03-wavy-hair.pkl
'https://drive.google.com/uc?id=1QIvKTrkYpUrdA45nf7pspwAqXDwWOLhV', # celebahq-classifier-04-young.pkl
'https://drive.google.com/uc?id=1QJPH5rW7MbIjFUdZT7vRYfyUjNYDl4_L', # celebahq-classifier-05-5-o-clock-shadow.pkl
'https://drive.google.com/uc?id=1QPZXSYf6cptQnApWS_T83sqFMun3rULY', # celebahq-classifier-06-arched-eyebrows.pkl
'https://drive.google.com/uc?id=1QPgoAZRqINXk_PFoQ6NwMmiJfxc5d2Pg', # celebahq-classifier-07-bags-under-eyes.pkl
'https://drive.google.com/uc?id=1QQPQgxgI6wrMWNyxFyTLSgMVZmRr1oO7', # celebahq-classifier-08-bald.pkl
'https://drive.google.com/uc?id=1QcSphAmV62UrCIqhMGgcIlZfoe8hfWaF', # celebahq-classifier-09-bangs.pkl
'https://drive.google.com/uc?id=1QdWTVwljClTFrrrcZnPuPOR4mEuz7jGh', # celebahq-classifier-10-big-lips.pkl
'https://drive.google.com/uc?id=1QgvEWEtr2mS4yj1b_Y3WKe6cLWL3LYmK', # celebahq-classifier-11-big-nose.pkl
'https://drive.google.com/uc?id=1QidfMk9FOKgmUUIziTCeo8t-kTGwcT18', # celebahq-classifier-12-black-hair.pkl
'https://drive.google.com/uc?id=1QthrJt-wY31GPtV8SbnZQZ0_UEdhasHO', # celebahq-classifier-13-blond-hair.pkl
'https://drive.google.com/uc?id=1QvCAkXxdYT4sIwCzYDnCL9Nb5TDYUxGW', # celebahq-classifier-14-blurry.pkl
'https://drive.google.com/uc?id=1QvLWuwSuWI9Ln8cpxSGHIciUsnmaw8L0', # celebahq-classifier-15-brown-hair.pkl
'https://drive.google.com/uc?id=1QxW6THPI2fqDoiFEMaV6pWWHhKI_OoA7', # celebahq-classifier-16-bushy-eyebrows.pkl
'https://drive.google.com/uc?id=1R71xKw8oTW2IHyqmRDChhTBkW9wq4N9v', # celebahq-classifier-17-chubby.pkl
'https://drive.google.com/uc?id=1RDn_fiLfEGbTc7JjazRXuAxJpr-4Pl67', # celebahq-classifier-18-double-chin.pkl
'https://drive.google.com/uc?id=1RGBuwXbaz5052bM4VFvaSJaqNvVM4_cI', # celebahq-classifier-19-eyeglasses.pkl
'https://drive.google.com/uc?id=1RIxOiWxDpUwhB-9HzDkbkLegkd7euRU9', # celebahq-classifier-20-goatee.pkl
'https://drive.google.com/uc?id=1RPaNiEnJODdr-fwXhUFdoSQLFFZC7rC-', # celebahq-classifier-21-gray-hair.pkl
'https://drive.google.com/uc?id=1RQH8lPSwOI2K_9XQCZ2Ktz7xm46o80ep', # celebahq-classifier-22-heavy-makeup.pkl
'https://drive.google.com/uc?id=1RXZM61xCzlwUZKq-X7QhxOg0D2telPow', # celebahq-classifier-23-high-cheekbones.pkl
'https://drive.google.com/uc?id=1RgASVHW8EWMyOCiRb5fsUijFu-HfxONM', # celebahq-classifier-24-mouth-slightly-open.pkl
'https://drive.google.com/uc?id=1RkC8JLqLosWMaRne3DARRgolhbtg_wnr', # celebahq-classifier-25-mustache.pkl
'https://drive.google.com/uc?id=1RqtbtFT2EuwpGTqsTYJDyXdnDsFCPtLO', # celebahq-classifier-26-narrow-eyes.pkl
'https://drive.google.com/uc?id=1Rs7hU-re8bBMeRHR-fKgMbjPh-RIbrsh', # celebahq-classifier-27-no-beard.pkl
'https://drive.google.com/uc?id=1RynDJQWdGOAGffmkPVCrLJqy_fciPF9E', # celebahq-classifier-28-oval-face.pkl
'https://drive.google.com/uc?id=1S0TZ_Hdv5cb06NDaCD8NqVfKy7MuXZsN', # celebahq-classifier-29-pale-skin.pkl
'https://drive.google.com/uc?id=1S3JPhZH2B4gVZZYCWkxoRP11q09PjCkA', # celebahq-classifier-30-pointy-nose.pkl
'https://drive.google.com/uc?id=1S3pQuUz-Jiywq_euhsfezWfGkfzLZ87W', # celebahq-classifier-31-receding-hairline.pkl
'https://drive.google.com/uc?id=1S6nyIl_SEI3M4l748xEdTV2vymB_-lrY', # celebahq-classifier-32-rosy-cheeks.pkl
'https://drive.google.com/uc?id=1S9P5WCi3GYIBPVYiPTWygrYIUSIKGxbU', # celebahq-classifier-33-sideburns.pkl
'https://drive.google.com/uc?id=1SANviG-pp08n7AFpE9wrARzozPIlbfCH', # celebahq-classifier-34-straight-hair.pkl
'https://drive.google.com/uc?id=1SArgyMl6_z7P7coAuArqUC2zbmckecEY', # celebahq-classifier-35-wearing-earrings.pkl
'https://drive.google.com/uc?id=1SC5JjS5J-J4zXFO9Vk2ZU2DT82TZUza_', # celebahq-classifier-36-wearing-hat.pkl
'https://drive.google.com/uc?id=1SDAQWz03HGiu0MSOKyn7gvrp3wdIGoj-', # celebahq-classifier-37-wearing-lipstick.pkl
'https://drive.google.com/uc?id=1SEtrVK-TQUC0XeGkBE9y7L8VXfbchyKX', # celebahq-classifier-38-wearing-necklace.pkl
'https://drive.google.com/uc?id=1SF_mJIdyGINXoV-I6IAxHB_k5dxiF6M-', # celebahq-classifier-39-wearing-necktie.pkl
]
#----------------------------------------------------------------------------
def prob_normalize(p):
p = np.asarray(p).astype(np.float32)
assert len(p.shape) == 2
return p / np.sum(p)
def mutual_information(p):
p = prob_normalize(p)
px = np.sum(p, axis=1)
py = np.sum(p, axis=0)
result = 0.0
for x in range(p.shape[0]):
p_x = px[x]
for y in range(p.shape[1]):
p_xy = p[x][y]
p_y = py[y]
if p_xy > 0.0:
result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output
return result
def entropy(p):
p = prob_normalize(p)
result = 0.0
for x in range(p.shape[0]):
for y in range(p.shape[1]):
p_xy = p[x][y]
if p_xy > 0.0:
result -= p_xy * np.log2(p_xy)
return result
def conditional_entropy(p):
# H(Y|X) where X corresponds to axis 0, Y to axis 1
# i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0?
p = prob_normalize(p)
y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y)
return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up.
#----------------------------------------------------------------------------
class LS(metric_base.MetricBase):
def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs):
assert num_keep <= num_samples
super().__init__(**kwargs)
self.num_samples = num_samples
self.num_keep = num_keep
self.attrib_indices = attrib_indices
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
# Construct TensorFlow graph for each GPU.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
# Generate images.
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
dlatents = Gs_clone.components.mapping.get_output_for(latents, labels, **Gs_kwargs)
images = Gs_clone.get_output_for(latents, None, **Gs_kwargs)
# Downsample to 256x256. The attribute classifiers were built for 256x256.
if images.shape[2] > 256:
factor = images.shape[2] // 256
images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = tf.reduce_mean(images, axis=[3, 5])
# Run classifier for each attribute.
result_dict = dict(latents=latents, dlatents=dlatents[:,-1])
for attrib_idx in self.attrib_indices:
classifier = misc.load_pkl(classifier_urls[attrib_idx])
logits = classifier.get_output_for(images, None)
predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1))
result_dict[attrib_idx] = predictions
result_expr.append(result_dict)
# Sampling loop.
results = []
for begin in range(0, self.num_samples, minibatch_size):
self._report_progress(begin, self.num_samples)
results += tflib.run(result_expr)
results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()}
# Calculate conditional entropy for each attribute.
conditional_entropies = defaultdict(list)
for attrib_idx in self.attrib_indices:
# Prune the least confident samples.
pruned_indices = list(range(self.num_samples))
pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i]))
pruned_indices = pruned_indices[:self.num_keep]
# Fit SVM to the remaining samples.
svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1)
for space in ['latents', 'dlatents']:
svm_inputs = results[space][pruned_indices]
try:
svm = sklearn.svm.LinearSVC()
svm.fit(svm_inputs, svm_targets)
svm.score(svm_inputs, svm_targets)
svm_outputs = svm.predict(svm_inputs)
except:
svm_outputs = svm_targets # assume perfect prediction
# Calculate conditional entropy.
p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)]
conditional_entropies[space].append(conditional_entropy(p))
# Calculate separability scores.
scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()}
self._report_result(scores['latents'], suffix='_z')
self._report_result(scores['dlatents'], suffix='_w')
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/metrics/linear_separability.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/metrics/linear_separability.py",
"repo_id": "insightface",
"token_count": 5029
} | 134 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Network architectures used in the StyleGAN paper."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Primitive ops for manipulating 4D activation tensors.
# The gradients of these are not necessary efficient or even meaningful.
def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(stride, int) and stride >= 1
# Finalize filter kernel.
f = np.array(f, dtype=np.float32)
if f.ndim == 1:
f = f[:, np.newaxis] * f[np.newaxis, :]
assert f.ndim == 2
if normalize:
f /= np.sum(f)
if flip:
f = f[::-1, ::-1]
f = f[:, :, np.newaxis, np.newaxis]
f = np.tile(f, [1, 1, int(x.shape[1]), 1])
# No-op => early exit.
if f.shape == (1, 1) and f[0,0] == 1:
return x
# Convolve using depthwise_conv2d.
orig_dtype = x.dtype
x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16
f = tf.constant(f, dtype=x.dtype, name='filter')
strides = [1, 1, stride, stride]
x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW')
x = tf.cast(x, orig_dtype)
return x
def _upscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Upscale using tf.tile().
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
def _downscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# 2x2, float32 => downscale using _blur2d().
if factor == 2 and x.dtype == tf.float32:
f = [np.sqrt(gain) / factor] * factor
return _blur2d(x, f=f, normalize=False, stride=factor)
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Large factor => downscale using tf.nn.avg_pool().
# NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work.
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW')
#----------------------------------------------------------------------------
# High-level ops for manipulating 4D activation tensors.
# The gradients of these are meant to be as efficient as possible.
def blur2d(x, f=[1,2,1], normalize=True):
with tf.variable_scope('Blur2D'):
@tf.custom_gradient
def func(x):
y = _blur2d(x, f, normalize)
@tf.custom_gradient
def grad(dy):
dx = _blur2d(dy, f, normalize, flip=True)
return dx, lambda ddx: _blur2d(ddx, f, normalize)
return y, grad
return func(x)
def upscale2d(x, factor=2):
with tf.variable_scope('Upscale2D'):
@tf.custom_gradient
def func(x):
y = _upscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _downscale2d(dy, factor, gain=factor**2)
return dx, lambda ddx: _upscale2d(ddx, factor)
return y, grad
return func(x)
def downscale2d(x, factor=2):
with tf.variable_scope('Downscale2D'):
@tf.custom_gradient
def func(x):
y = _downscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _upscale2d(dy, factor, gain=1/factor**2)
return dx, lambda ddx: _downscale2d(ddx, factor)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense(x, fmaps, **kwargs):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, **kwargs):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Fused convolution + scaling.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) * 2 >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return conv2d(upscale2d(x), fmaps, kernel, **kwargs)
# Fused => perform both ops simultaneously using tf.nn.conv2d_transpose().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in]
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return downscale2d(conv2d(x, fmaps, kernel, **kwargs))
# Fused => perform both ops simultaneously using tf.nn.conv2d().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x, lrmul=1):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.variable_scope('LeakyReLU'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
@tf.custom_gradient
def func(x):
y = tf.maximum(x, x * alpha)
@tf.custom_gradient
def grad(dy):
dx = tf.where(y >= 0, dy, dy * alpha)
return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Instance normalization.
def instance_norm(x, epsilon=1e-8):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('InstanceNorm'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2,3], keepdims=True)
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon)
x = tf.cast(x, orig_dtype)
return x
#----------------------------------------------------------------------------
# Style modulation.
def style_mod(x, dlatent, **kwargs):
with tf.variable_scope('StyleMod'):
style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs))
style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2))
return x * (style[:,0] + 1) + style[:,1]
#----------------------------------------------------------------------------
# Noise input.
def apply_noise(x, noise_var=None, randomize_noise=True):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('Noise'):
if noise_var is None or randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_var, x.dtype)
weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros())
return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Style-based generator used in the StyleGAN paper.
# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below.
def G_style(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
**kwargs): # Arguments for sub-networks (G_mapping and G_synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0):
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.name_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None and truncation_cutoff is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
ones = np.ones(layer_idx.shape, dtype=np.float32)
coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones)
dlatents = tflib.lerp(dlatent_avg, dlatents, coefs)
# Evaluate synthesis network.
with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]):
images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Mapping network used in the StyleGAN paper.
def G_mapping(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'.
use_wscale = True, # Enable equalized learning rate?
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity]
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# Normalize latents.
if normalize_latents:
x = pixel_norm(x)
# Mapping layers.
for layer_idx in range(mapping_layers):
with tf.variable_scope('Dense%d' % layer_idx):
fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul)
x = apply_bias(x, lrmul=mapping_lrmul)
x = act(x)
# Broadcast.
if dlatent_broadcast is not None:
with tf.variable_scope('Broadcast'):
x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# Synthesis network used in the StyleGAN paper.
def G_synthesis(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_styles = True, # Enable style inputs?
const_input_layer = True, # First layer is a learned constant?
use_noise = True, # Enable noise inputs?
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'
use_wscale = True, # Enable equalized learning rate?
use_pixel_norm = False, # Enable pixelwise feature vector normalization?
use_instance_norm = True, # Enable instance normalization?
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
num_layers = resolution_log2 * 2 - 2
num_styles = num_layers if use_styles else 1
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_styles, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
if use_noise:
for layer_idx in range(num_layers):
res = layer_idx // 2 + 2
shape = [1, use_noise, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Things to do at the end of each layer.
def layer_epilogue(x, layer_idx):
if use_noise:
x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise)
x = apply_bias(x)
x = act(x)
if use_pixel_norm:
x = pixel_norm(x)
if use_instance_norm:
x = instance_norm(x)
if use_styles:
x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale)
return x
# Early layers.
with tf.variable_scope('4x4'):
if const_input_layer:
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones())
x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0)
else:
with tf.variable_scope('Dense'):
x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN
x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0)
with tf.variable_scope('Conv'):
x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4)
with tf.variable_scope('Conv1'):
x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3)
return x
def torgb(res, x): # res = 2..resolution_log2
lod = resolution_log2 - res
with tf.variable_scope('ToRGB_lod%d' % lod):
return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
images_out = upscale2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(x, res, lod):
y = block(res, x)
img = lambda: upscale2d(torgb(res, y), 2**lod)
img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Discriminator used in the StyleGAN paper.
def D_basic(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of input color channels. Overridden based on dataset.
resolution = 32, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu',
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
scores_out = None
# Building blocks.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale)))
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res >= 3: # 8x8 and up
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)))
else: # 4x4
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense0'):
x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense1'):
x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale))
return x
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
x = fromrgb(images_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
x = block(x, res)
scores_out = block(x, 2)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
img = downscale2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = tflib.lerp_clip(x, y, lod_in - lod)
scores_out = block(x, 2)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(res, lod):
x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod))
return y()
scores_out = grow(2, resolution_log2 - 2)
# Label conditioning from "Which Training Methods for GANs do actually Converge?"
if label_size:
with tf.variable_scope('LabelSwitch'):
scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True)
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/training/networks_stylegan.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/training/networks_stylegan.py",
"repo_id": "insightface",
"token_count": 14627
} | 135 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Text, Any, Callable
from ._graph import Node, Graph
class ErrorHandling(object):
'''
To handle errors and addition of custom layers
'''
def __init__(self,
add_custom_layers = False, # type: bool
custom_conversion_functions = dict(), # type: Dict[Text, Any]
custom_layer_nodes = [], # type : List[Node]
):
# type: (...) -> None
self.add_custom_layers = add_custom_layers
self.custom_conversion_functions = custom_conversion_functions
self.custom_layer_nodes = custom_layer_nodes
def unsupported_op(self,
node, # type: Node
):
# type: (...) -> Callable[[Any, Node, Graph, ErrorHandling], None]
'''
Either raise an error for an unsupported op type or return custom layer add function
'''
if self.add_custom_layers:
from ._operators import _convert_custom
return _convert_custom
else:
raise TypeError(
"ONNX node of type {} is not supported.\n".format(node.op_type,)
)
def unsupported_op_configuration(self,
node, # type: Node
err_message, # type: Text
):
raise TypeError(
"Error while converting op of type: {}. Error message: {}\n".format(node.op_type, err_message, )
)
def missing_initializer(self,
node, # type: Node
err_message, # type: Text
):
# type: (...) -> None
'''
Missing initializer error
'''
raise ValueError(
"Missing initializer error in op of type {}, with input name = {}, "
"output name = {}. Error message: {}\n".
format(node.op_type, node.inputs[0], node.outputs[0], err_message)
)
| insightface/tools/onnx2caffe/onnx2caffe/_error_utils.py/0 | {
"file_path": "insightface/tools/onnx2caffe/onnx2caffe/_error_utils.py",
"repo_id": "insightface",
"token_count": 934
} | 136 |
package com.example.nativedemo;
import com.baomidou.mybatisplus.core.handlers.MetaObjectHandler;
import org.apache.ibatis.reflection.MetaObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
/**
* @author nieqiurong
*/
@Component
public class MyMetaObjectHandler implements MetaObjectHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(MyMetaObjectHandler.class);
@Override
public void insertFill(MetaObject metaObject) {
LOGGER.info("进入insertFill填充了");
setFieldValByName("createUser","user",metaObject);
}
@Override
public void updateFill(MetaObject metaObject) {
LOGGER.info("进入updateFill填充了");
setFieldValByName("updateUser","user",metaObject);
}
}
| mybatis-native-demo/src/main/java/com/example/nativedemo/MyMetaObjectHandler.java/0 | {
"file_path": "mybatis-native-demo/src/main/java/com/example/nativedemo/MyMetaObjectHandler.java",
"repo_id": "mybatis-native-demo",
"token_count": 296
} | 137 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/ch.qos.logback/logback-classic/1.2.11/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/ch.qos.logback/logback-classic/1.2.11/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 138 |
[
{
"latest": true,
"metadata-version": "19.2",
"module": "com.graphql-java:graphql-java",
"tested-versions": [
"19.2"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.graphql-java/graphql-java/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.graphql-java/graphql-java/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 79
} | 139 |
[
{
"latest": true,
"metadata-version": "8.0.31",
"module": "com.mysql:mysql-connector-j",
"tested-versions": [
"8.0.31"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.mysql/mysql-connector-j/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.mysql/mysql-connector-j/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 84
} | 140 |
[
{
"latest": true,
"override": true,
"metadata-version": "1.51.0",
"module": "io.grpc:grpc-netty",
"tested-versions": [
"1.51.0"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.grpc/grpc-netty/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.grpc/grpc-netty/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 91
} | 141 |
[
{
"latest": true,
"override": true,
"metadata-version": "4.1.80.Final",
"module": "io.netty:netty-buffer",
"tested-versions": [
"4.1.80.Final"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.netty/netty-buffer/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.netty/netty-buffer/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 95
} | 142 |
[
{
"latest": false,
"override": true,
"metadata-version": "1.19.0",
"module": "io.opentelemetry:opentelemetry-exporter-jaeger",
"tested-versions": [
"1.19.0"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-jaeger/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-jaeger/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 100
} | 143 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.undertow/undertow-core/2.2.19.Final/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.undertow/undertow-core/2.2.19.Final/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 144 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.commons/commons-compress/1.23.0/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.commons/commons-compress/1.23.0/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 145 |
{
"bundles": [
{
"name": "jakarta.servlet.LocalStrings",
"locales": [
"und"
]
},
{
"name": "jakarta.servlet.http.LocalStrings",
"locales": [
"und"
]
}
],
"resources": {
"includes": [
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.handler.ResourceHandler"
},
"pattern": "\\Qjetty-dir.css\\E"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.http.MimeTypes"
},
"pattern": "\\Qorg/eclipse/jetty/http/encoding.properties\\E"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.http.MimeTypes"
},
"pattern": "\\Qorg/eclipse/jetty/http/mime.properties\\E"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.Jetty"
},
"pattern": "\\Qorg/eclipse/jetty/version/build.properties\\E"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.resource.Resource"
},
"pattern": "\\Qorg/eclipse/jetty/webapp/webdefault.xml\\E"
}
]
}
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/11.0.12/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/11.0.12/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 631
} | 146 |
[
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.disk.OffHeapDiskStore"
},
"interfaces": [
"org.ehcache.shadow.org.terracotta.offheapstore.storage.portability.Portability",
"org.ehcache.shadow.org.terracotta.offheapstore.disk.persistent.PersistentPortability"
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.disk.OffHeapDiskStore"
},
"interfaces": [
"org.ehcache.shadow.org.terracotta.offheapstore.storage.portability.WriteBackPortability",
"org.ehcache.shadow.org.terracotta.offheapstore.disk.persistent.PersistentPortability"
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.annotations.AnnotationProxyFactory"
},
"interfaces": [
"org.ehcache.sizeof.annotations.IgnoreSizeOf"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/proxy-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/proxy-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 354
} | 147 |
[
{
"latest": true,
"metadata-version": "2.3.31",
"module": "org.freemarker:freemarker",
"tested-versions": [
"2.3.31"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.freemarker/freemarker/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.freemarker/freemarker/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 81
} | 148 |
[
"proxy-config.json",
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-envers/6.1.1.Final/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-envers/6.1.1.Final/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 31
} | 149 |
{
"resources":{
"includes":[
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"pattern":"\\QMETA-INF/persistence.xml\\E"
},
{
"condition":{"typeReachable":"org.hibernate.internal.util.ConfigHelper"},
"pattern":"\\Qhibernate.properties\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$DtdMapping"},
"pattern":"\\Qorg/hibernate/hibernate-configuration-3.0.dtd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$DtdMapping"},
"pattern":"\\Qorg/hibernate/hibernate-mapping-3.0.dtd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$NamespaceSchemaMapping"},
"pattern":"\\Qorg/hibernate/hibernate-mapping-4.0.xsd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$NamespaceSchemaMapping"},
"pattern":"\\Qorg/hibernate/jpa/orm_2_0.xsd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$NamespaceSchemaMapping"},
"pattern":"\\Qorg/hibernate/jpa/orm_2_1.xsd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$NamespaceSchemaMapping"},
"pattern":"\\Qorg/hibernate/jpa/orm_2_2.xsd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$NamespaceSchemaMapping"},
"pattern":"\\Qorg/hibernate/jpa/orm_3_0.xsd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.xsd.LocalXsdResolver"},
"pattern":"\\Qorg/hibernate/jpa/persistence_2_0.xsd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$NamespaceSchemaMapping"},
"pattern":"\\Qorg/hibernate/xsd/cfg/legacy-configuration-4.0.xsd\\E"
},
{
"condition":{"typeReachable":"org.hibernate.boot.jaxb.internal.stax.LocalXmlResourceResolver$NamespaceSchemaMapping"},
"pattern":"\\Qorg/hibernate/xsd/mapping/legacy-mapping-4.0.xsd\\E"
}
]}
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/5.6.14.Final/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/5.6.14.Final/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 1466
} | 150 |
[
{
"condition": {
"typeReachable": "org.jline.terminal.impl.jansi.JansiNativePty"
},
"fields": [
{
"name": "HAVE_ISATTY"
},
{
"name": "HAVE_TTYNAME"
},
{
"name": "TCSADRAIN"
},
{
"name": "TCSAFLUSH"
},
{
"name": "TCSANOW"
},
{
"name": "TIOCGETD"
},
{
"name": "TIOCGWINSZ"
},
{
"name": "TIOCSETD"
},
{
"name": "TIOCSWINSZ"
}
],
"name": "org.fusesource.jansi.internal.CLibrary"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.CLibrary$WinSize"
},
"name": "org.fusesource.jansi.internal.CLibrary$WinSize"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.CLibrary$Termios"
},
"name": "org.fusesource.jansi.internal.CLibrary$Termios"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32"
},
"name": "org.fusesource.jansi.internal.Kernel32"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$SMALL_RECT"
},
"name": "org.fusesource.jansi.internal.Kernel32$SMALL_RECT"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$COORD"
},
"name": "org.fusesource.jansi.internal.Kernel32$COORD"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$CONSOLE_SCREEN_BUFFER_INFO"
},
"name": "org.fusesource.jansi.internal.Kernel32$CONSOLE_SCREEN_BUFFER_INFO"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$CHAR_INFO"
},
"name": "org.fusesource.jansi.internal.Kernel32$CHAR_INFO"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$KEY_EVENT_RECORD"
},
"name": "org.fusesource.jansi.internal.Kernel32$KEY_EVENT_RECORD"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD"
},
"name": "org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$WINDOW_BUFFER_SIZE_RECORD"
},
"name": "org.fusesource.jansi.internal.Kernel32$WINDOW_BUFFER_SIZE_RECORD"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD"
},
"name": "org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$MENU_EVENT_RECORD"
},
"name": "org.fusesource.jansi.internal.Kernel32$MENU_EVENT_RECORD"
},
{
"allDeclaredFields": true,
"condition": {
"typeReachable": "org.fusesource.jansi.internal.Kernel32$INPUT_RECORD"
},
"name": "org.fusesource.jansi.internal.Kernel32$INPUT_RECORD"
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/jni-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/jni-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 1634
} | 151 |
[
{
"metadata-version": "4.17.0",
"module": "org.liquibase:liquibase-core",
"tested-versions": [
"4.17.0"
]
},
{
"latest": true,
"metadata-version": "4.20.0",
"module": "org.liquibase:liquibase-core",
"tested-versions": [
"4.20.0"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.liquibase/liquibase-core/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.liquibase/liquibase-core/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 157
} | 152 |
[
{
"latest": true,
"metadata-version": "3.1.0-og",
"module": "org.opengauss:opengauss-jdbc",
"tested-versions": [
"3.1.0-og"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.opengauss/opengauss-jdbc/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.opengauss/opengauss-jdbc/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 91
} | 153 |
[
{
"latest": true,
"metadata-version": "3.1.0.M1",
"module": "org.thymeleaf.extras:thymeleaf-extras-springsecurity6",
"tested-versions": [
"3.1.0.M1"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf.extras/thymeleaf-extras-springsecurity6/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf.extras/thymeleaf-extras-springsecurity6/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 97
} | 154 |
package com.example.nativedemo;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link MyBatisNativeConfiguration}.
*/
public class MyBatisNativeConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'myBatisNativeConfiguration'.
*/
public static BeanDefinition getMyBatisNativeConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MyBatisNativeConfiguration.class);
beanDefinition.setInstanceSupplier(MyBatisNativeConfiguration::new);
return beanDefinition;
}
/**
* Get the bean definition for 'myBatisMapperFactoryBeanPostProcessor'.
*/
public static BeanDefinition getMyBatisMapperFactoryBeanPostProcessorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MyBatisNativeConfiguration.class);
beanDefinition.setTargetType(MyBatisNativeConfiguration.MyBatisMapperFactoryBeanPostProcessor.class);
beanDefinition.setInstanceSupplier(BeanInstanceSupplier.<MyBatisNativeConfiguration.MyBatisMapperFactoryBeanPostProcessor>forFactoryMethod(MyBatisNativeConfiguration.class, "myBatisMapperFactoryBeanPostProcessor").withGenerator((registeredBean) -> MyBatisNativeConfiguration.myBatisMapperFactoryBeanPostProcessor()));
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/com/example/nativedemo/MyBatisNativeConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/com/example/nativedemo/MyBatisNativeConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 408
} | 155 |
package org.springframework.boot.autoconfigure;
import java.lang.String;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link AutoConfigurationPackages}.
*/
public class AutoConfigurationPackages__BeanDefinitions {
/**
* Bean definitions for {@link AutoConfigurationPackages.BasePackages}.
*/
public static class BasePackages {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.AutoConfigurationPackages'.
*/
private static BeanInstanceSupplier<AutoConfigurationPackages.BasePackages> getAutoConfigurationPackagesInstanceSupplier(
) {
return BeanInstanceSupplier.<AutoConfigurationPackages.BasePackages>forConstructor(String[].class)
.withGenerator((registeredBean, args) -> new AutoConfigurationPackages.BasePackages(args.get(0)));
}
/**
* Get the bean definition for 'autoConfigurationPackages'.
*/
public static BeanDefinition getAutoConfigurationPackagesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(AutoConfigurationPackages.BasePackages.class);
beanDefinition.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
beanDefinition.getConstructorArgumentValues().addIndexedArgumentValue(0, new String[] {"com.example.nativedemo"});
beanDefinition.setInstanceSupplier(getAutoConfigurationPackagesInstanceSupplier());
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/AutoConfigurationPackages__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/AutoConfigurationPackages__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 486
} | 156 |
package org.springframework.boot.autoconfigure.jdbc;
import javax.sql.DataSource;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link DataSourceJmxConfiguration}.
*/
public class DataSourceJmxConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'dataSourceJmxConfiguration'.
*/
public static BeanDefinition getDataSourceJmxConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceJmxConfiguration.class);
beanDefinition.setInstanceSupplier(DataSourceJmxConfiguration::new);
return beanDefinition;
}
/**
* Bean definitions for {@link DataSourceJmxConfiguration.Hikari}.
*/
public static class Hikari {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.jdbc.DataSourceJmxConfiguration$Hikari'.
*/
private static BeanInstanceSupplier<DataSourceJmxConfiguration.Hikari> getHikariInstanceSupplier(
) {
return BeanInstanceSupplier.<DataSourceJmxConfiguration.Hikari>forConstructor(DataSource.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> new DataSourceJmxConfiguration.Hikari(args.get(0), args.get(1)));
}
/**
* Get the bean definition for 'hikari'.
*/
public static BeanDefinition getHikariBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceJmxConfiguration.Hikari.class);
beanDefinition.setInstanceSupplier(getHikariInstanceSupplier());
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceJmxConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceJmxConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 562
} | 157 |
package org.springframework.cloud.autoconfigure;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.cloud.context.environment.EnvironmentManager;
import org.springframework.core.env.ConfigurableEnvironment;
/**
* Bean definitions for {@link LifecycleMvcEndpointAutoConfiguration}.
*/
public class LifecycleMvcEndpointAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'lifecycleMvcEndpointAutoConfiguration'.
*/
public static BeanDefinition getLifecycleMvcEndpointAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(LifecycleMvcEndpointAutoConfiguration.class);
beanDefinition.setInstanceSupplier(LifecycleMvcEndpointAutoConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'environmentManager'.
*/
private static BeanInstanceSupplier<EnvironmentManager> getEnvironmentManagerInstanceSupplier() {
return BeanInstanceSupplier.<EnvironmentManager>forFactoryMethod(LifecycleMvcEndpointAutoConfiguration.class, "environmentManager", ConfigurableEnvironment.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(LifecycleMvcEndpointAutoConfiguration.class).environmentManager(args.get(0)));
}
/**
* Get the bean definition for 'environmentManager'.
*/
public static BeanDefinition getEnvironmentManagerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(EnvironmentManager.class);
beanDefinition.setInstanceSupplier(getEnvironmentManagerInstanceSupplier());
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/autoconfigure/LifecycleMvcEndpointAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/autoconfigure/LifecycleMvcEndpointAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 483
} | 158 |
package org.springframework.cloud.commons.util;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.InstanceSupplier;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link UtilAutoConfiguration}.
*/
public class UtilAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'utilAutoConfiguration'.
*/
public static BeanDefinition getUtilAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(UtilAutoConfiguration.class);
beanDefinition.setInstanceSupplier(UtilAutoConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'inetUtilsProperties'.
*/
private static BeanInstanceSupplier<InetUtilsProperties> getInetUtilsPropertiesInstanceSupplier(
) {
return BeanInstanceSupplier.<InetUtilsProperties>forFactoryMethod(UtilAutoConfiguration.class, "inetUtilsProperties")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(UtilAutoConfiguration.class).inetUtilsProperties());
}
/**
* Get the bean definition for 'inetUtilsProperties'.
*/
public static BeanDefinition getInetUtilsPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(InetUtilsProperties.class);
InstanceSupplier<InetUtilsProperties> instanceSupplier = getInetUtilsPropertiesInstanceSupplier();
instanceSupplier = instanceSupplier.andThen(InetUtilsProperties__Autowiring::apply);
beanDefinition.setInstanceSupplier(instanceSupplier);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'inetUtils'.
*/
private static BeanInstanceSupplier<InetUtils> getInetUtilsInstanceSupplier() {
return BeanInstanceSupplier.<InetUtils>forFactoryMethod(UtilAutoConfiguration.class, "inetUtils", InetUtilsProperties.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(UtilAutoConfiguration.class).inetUtils(args.get(0)));
}
/**
* Get the bean definition for 'inetUtils'.
*/
public static BeanDefinition getInetUtilsBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(InetUtils.class);
beanDefinition.setDestroyMethodNames("close");
beanDefinition.setInstanceSupplier(getInetUtilsInstanceSupplier());
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/commons/util/UtilAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/commons/util/UtilAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 764
} | 159 |
version: 2.2.1
移除 NACOS_AUTH_TOKEN 默认值
移除 NACOS_AUTH_IDENTITY_KEY 默认值
移除 NACOS_AUTH_IDENTITY_VALUE 默认值 | nacos-docker/changlog/0 | {
"file_path": "nacos-docker/changlog",
"repo_id": "nacos-docker",
"token_count": 86
} | 160 |
2024-05-27 10:49:41,149 INFO [dump] add formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-05-27 10:49:41,189 INFO [dump] process formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-05-27 10:49:41,192 INFO [dump] md5 changed, save to disk cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=83267b8356f558ebb72b0da2ebdde5f0,oldMd5=2401670256689a50be81a932cab6af5d
2024-05-27 10:49:41,192 INFO [dump] md5 changed, update md5 and timestamp in jvm cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=83267b8356f558ebb72b0da2ebdde5f0,oldMd5=2401670256689a50be81a932cab6af5d,lastModifiedTs=1716778181146
| nacos-docker/example/standalone-logs/config-dump.log.2024-05-27.0/0 | {
"file_path": "nacos-docker/example/standalone-logs/config-dump.log.2024-05-27.0",
"repo_id": "nacos-docker",
"token_count": 280
} | 161 |
2024-06-06 14:18:21,599 INFO notifyConnectTimeout:100
2024-06-06 14:18:21,603 INFO notifySocketTimeout:200
2024-06-06 14:18:21,603 INFO isHealthCheck:true
2024-06-06 14:18:21,603 INFO maxHealthCheckFailCount:12
2024-06-06 14:18:21,603 INFO maxContent:10485760
2024-06-06 14:18:25,926 WARN DumpService start
2024-06-06 14:18:25,926 INFO start clear all config-info.
2024-06-06 14:18:25,927 INFO clear all config-info success.
2024-06-06 14:18:25,937 INFO clear all config-info-tenant success.
2024-06-06 14:18:26,002 INFO All dump page size is set to 50 according to mem limit 972 MB
2024-06-06 14:18:26,003 INFO start dump all config-info...
2024-06-06 14:18:26,040 INFO [all-dump] submit all task for 765369249085530112 / 765369249085530112, dbTime=36,diskTime=1
2024-06-06 14:18:26,041 INFO [all-dump] wait 1 dump tasks to be finished
2024-06-06 14:18:27,041 INFO success to dump all config-info。
2024-06-06 14:18:27,042 INFO start clear all config-info-beta.
2024-06-06 14:18:27,042 INFO clear all config-info-beta success.
2024-06-06 14:18:27,042 INFO clear all config-info-beta-tenant success.
2024-06-06 14:18:27,059 INFO start clear all config-info-tag.
2024-06-06 14:18:27,059 INFO clear all config-info-tag success.
2024-06-06 14:18:27,059 INFO clear all config-info-tag-tenant success.
| nacos-docker/example/standalone-logs/config-server.log/0 | {
"file_path": "nacos-docker/example/standalone-logs/config-server.log",
"repo_id": "nacos-docker",
"token_count": 522
} | 162 |
version: "3.8"
services:
nacos:
image: nacos/nacos-server:${NACOS_VERSION}
container_name: nacos-standalone-mysql
env_file:
- ../env/nacos-standlone-mysql.env
volumes:
- ./standalone-logs/:/home/nacos/logs
ports:
- "8848:8848"
- "9848:9848"
depends_on:
mysql:
condition: service_healthy
restart: always
mysql:
container_name: mysql
build:
context: .
dockerfile: ./image/mysql/8/Dockerfile
image: example/mysql:8.0.30
env_file:
- ../env/mysql.env
volumes:
- ./mysql:/var/lib/mysql
ports:
- "3306:3306"
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
interval: 5s
timeout: 10s
retries: 10
| nacos-docker/example/standalone-mysql-8.yaml/0 | {
"file_path": "nacos-docker/example/standalone-mysql-8.yaml",
"repo_id": "nacos-docker",
"token_count": 384
} | 163 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="SqlDialectMappings">
<file url="file://$PROJECT_DIR$/src/main/kotlin/org/example/pgvector/dao/DatabaseRecordDao.kt" dialect="GenericSQL" />
</component>
</project> | pgvector/.idea/sqldialects.xml/0 | {
"file_path": "pgvector/.idea/sqldialects.xml",
"repo_id": "pgvector",
"token_count": 92
} | 164 |
package org.springframework.boot.autoconfigure.context;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link ConfigurationPropertiesAutoConfiguration}.
*/
@Generated
public class ConfigurationPropertiesAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'configurationPropertiesAutoConfiguration'.
*/
public static BeanDefinition getConfigurationPropertiesAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ConfigurationPropertiesAutoConfiguration.class);
beanDefinition.setInstanceSupplier(ConfigurationPropertiesAutoConfiguration::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/context/ConfigurationPropertiesAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/context/ConfigurationPropertiesAutoConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 205
} | 165 |
package org.springframework.boot.autoconfigure.jackson;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link JacksonProperties}.
*/
@Generated
public class JacksonProperties__BeanDefinitions {
/**
* Get the bean definition for 'jacksonProperties'.
*/
public static BeanDefinition getJacksonPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JacksonProperties.class);
beanDefinition.setInstanceSupplier(JacksonProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/jackson/JacksonProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/jackson/JacksonProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 194
} | 166 |
package org.springframework.boot.autoconfigure.orm.jpa;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link HibernateProperties}.
*/
@Generated
public class HibernateProperties__BeanDefinitions {
/**
* Get the bean definition for 'hibernateProperties'.
*/
public static BeanDefinition getHibernatePropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(HibernateProperties.class);
beanDefinition.setInstanceSupplier(HibernateProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/HibernateProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/HibernateProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 213
} | 167 |
package org.springframework.boot.autoconfigure.web.servlet;
import java.lang.SuppressWarnings;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.ListableBeanFactory;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.InstanceSupplier;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.boot.autoconfigure.web.WebProperties;
import org.springframework.boot.web.servlet.filter.OrderedFormContentFilter;
import org.springframework.context.ApplicationContext;
import org.springframework.format.support.FormattingConversionService;
import org.springframework.validation.Validator;
import org.springframework.web.accept.ContentNegotiationManager;
import org.springframework.web.filter.RequestContextFilter;
import org.springframework.web.servlet.FlashMapManager;
import org.springframework.web.servlet.LocaleResolver;
import org.springframework.web.servlet.ThemeResolver;
import org.springframework.web.servlet.resource.ResourceUrlProvider;
import org.springframework.web.servlet.view.ContentNegotiatingViewResolver;
import org.springframework.web.servlet.view.InternalResourceViewResolver;
/**
* Bean definitions for {@link WebMvcAutoConfiguration}.
*/
@Generated
public class WebMvcAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'webMvcAutoConfiguration'.
*/
public static BeanDefinition getWebMvcAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WebMvcAutoConfiguration.class);
beanDefinition.setInstanceSupplier(WebMvcAutoConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'formContentFilter'.
*/
private static BeanInstanceSupplier<OrderedFormContentFilter> getFormContentFilterInstanceSupplier(
) {
return BeanInstanceSupplier.<OrderedFormContentFilter>forFactoryMethod(WebMvcAutoConfiguration.class, "formContentFilter")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.class).formContentFilter());
}
/**
* Get the bean definition for 'formContentFilter'.
*/
public static BeanDefinition getFormContentFilterBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(OrderedFormContentFilter.class);
beanDefinition.setInstanceSupplier(getFormContentFilterInstanceSupplier());
return beanDefinition;
}
/**
* Bean definitions for {@link WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter}.
*/
@Generated
public static class WebMvcAutoConfigurationAdapter {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.web.servlet.WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter'.
*/
private static BeanInstanceSupplier<WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter> getWebMvcAutoConfigurationAdapterInstanceSupplier(
) {
return BeanInstanceSupplier.<WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter>forConstructor(WebProperties.class, WebMvcProperties.class, ListableBeanFactory.class, ObjectProvider.class, ObjectProvider.class, ObjectProvider.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> new WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter(args.get(0), args.get(1), args.get(2), args.get(3), args.get(4), args.get(5), args.get(6)));
}
/**
* Get the bean definition for 'webMvcAutoConfigurationAdapter'.
*/
public static BeanDefinition getWebMvcAutoConfigurationAdapterBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.class);
beanDefinition.setInstanceSupplier(getWebMvcAutoConfigurationAdapterInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'defaultViewResolver'.
*/
private static BeanInstanceSupplier<InternalResourceViewResolver> getDefaultViewResolverInstanceSupplier(
) {
return BeanInstanceSupplier.<InternalResourceViewResolver>forFactoryMethod(WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.class, "defaultViewResolver")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.class).defaultViewResolver());
}
/**
* Get the bean definition for 'defaultViewResolver'.
*/
public static BeanDefinition getDefaultViewResolverBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(InternalResourceViewResolver.class);
beanDefinition.setInstanceSupplier(getDefaultViewResolverInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'viewResolver'.
*/
private static BeanInstanceSupplier<ContentNegotiatingViewResolver> getViewResolverInstanceSupplier(
) {
return BeanInstanceSupplier.<ContentNegotiatingViewResolver>forFactoryMethod(WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.class, "viewResolver", BeanFactory.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.class).viewResolver(args.get(0)));
}
/**
* Get the bean definition for 'viewResolver'.
*/
public static BeanDefinition getViewResolverBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ContentNegotiatingViewResolver.class);
beanDefinition.setInstanceSupplier(getViewResolverInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean definition for 'requestContextFilter'.
*/
public static BeanDefinition getRequestContextFilterBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.class);
beanDefinition.setTargetType(RequestContextFilter.class);
beanDefinition.setInstanceSupplier(BeanInstanceSupplier.<RequestContextFilter>forFactoryMethod(WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.class, "requestContextFilter").withGenerator((registeredBean) -> WebMvcAutoConfiguration.WebMvcAutoConfigurationAdapter.requestContextFilter()));
return beanDefinition;
}
}
/**
* Bean definitions for {@link WebMvcAutoConfiguration.EnableWebMvcConfiguration}.
*/
@Generated
public static class EnableWebMvcConfiguration {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.web.servlet.WebMvcAutoConfiguration$EnableWebMvcConfiguration'.
*/
private static BeanInstanceSupplier<WebMvcAutoConfiguration.EnableWebMvcConfiguration> getEnableWebMvcConfigurationInstanceSupplier(
) {
return BeanInstanceSupplier.<WebMvcAutoConfiguration.EnableWebMvcConfiguration>forConstructor(WebMvcProperties.class, WebProperties.class, ObjectProvider.class, ObjectProvider.class, ListableBeanFactory.class)
.withGenerator((registeredBean, args) -> new WebMvcAutoConfiguration.EnableWebMvcConfiguration(args.get(0), args.get(1), args.get(2), args.get(3), args.get(4)));
}
/**
* Get the bean definition for 'enableWebMvcConfiguration'.
*/
public static BeanDefinition getEnableWebMvcConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class);
InstanceSupplier<WebMvcAutoConfiguration.EnableWebMvcConfiguration> instanceSupplier = getEnableWebMvcConfigurationInstanceSupplier();
instanceSupplier = instanceSupplier.andThen(WebMvcAutoConfiguration_EnableWebMvcConfiguration__Autowiring::apply);
beanDefinition.setInstanceSupplier(instanceSupplier);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'welcomePageHandlerMapping'.
*/
private static BeanInstanceSupplier<WelcomePageHandlerMapping> getWelcomePageHandlerMappingInstanceSupplier(
) {
return BeanInstanceSupplier.<WelcomePageHandlerMapping>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "welcomePageHandlerMapping", ApplicationContext.class, FormattingConversionService.class, ResourceUrlProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).welcomePageHandlerMapping(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'welcomePageHandlerMapping'.
*/
public static BeanDefinition getWelcomePageHandlerMappingBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WelcomePageHandlerMapping.class);
beanDefinition.setInstanceSupplier(getWelcomePageHandlerMappingInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'welcomePageNotAcceptableHandlerMapping'.
*/
private static BeanInstanceSupplier<WelcomePageNotAcceptableHandlerMapping> getWelcomePageNotAcceptableHandlerMappingInstanceSupplier(
) {
return BeanInstanceSupplier.<WelcomePageNotAcceptableHandlerMapping>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "welcomePageNotAcceptableHandlerMapping", ApplicationContext.class, FormattingConversionService.class, ResourceUrlProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).welcomePageNotAcceptableHandlerMapping(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'welcomePageNotAcceptableHandlerMapping'.
*/
public static BeanDefinition getWelcomePageNotAcceptableHandlerMappingBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WelcomePageNotAcceptableHandlerMapping.class);
beanDefinition.setInstanceSupplier(getWelcomePageNotAcceptableHandlerMappingInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'localeResolver'.
*/
private static BeanInstanceSupplier<LocaleResolver> getLocaleResolverInstanceSupplier() {
return BeanInstanceSupplier.<LocaleResolver>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "localeResolver")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).localeResolver());
}
/**
* Get the bean definition for 'localeResolver'.
*/
public static BeanDefinition getLocaleResolverBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(LocaleResolver.class);
beanDefinition.setInstanceSupplier(getLocaleResolverInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'themeResolver'.
*/
@SuppressWarnings("deprecation")
private static BeanInstanceSupplier<ThemeResolver> getThemeResolverInstanceSupplier() {
return BeanInstanceSupplier.<ThemeResolver>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "themeResolver")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).themeResolver());
}
/**
* Get the bean definition for 'themeResolver'.
*/
@SuppressWarnings("deprecation")
public static BeanDefinition getThemeResolverBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ThemeResolver.class);
beanDefinition.setInstanceSupplier(getThemeResolverInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'flashMapManager'.
*/
private static BeanInstanceSupplier<FlashMapManager> getFlashMapManagerInstanceSupplier() {
return BeanInstanceSupplier.<FlashMapManager>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "flashMapManager")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).flashMapManager());
}
/**
* Get the bean definition for 'flashMapManager'.
*/
public static BeanDefinition getFlashMapManagerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(FlashMapManager.class);
beanDefinition.setInstanceSupplier(getFlashMapManagerInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'mvcConversionService'.
*/
private static BeanInstanceSupplier<FormattingConversionService> getMvcConversionServiceInstanceSupplier(
) {
return BeanInstanceSupplier.<FormattingConversionService>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "mvcConversionService")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).mvcConversionService());
}
/**
* Get the bean definition for 'mvcConversionService'.
*/
public static BeanDefinition getMvcConversionServiceBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(FormattingConversionService.class);
beanDefinition.setInstanceSupplier(getMvcConversionServiceInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'mvcValidator'.
*/
private static BeanInstanceSupplier<Validator> getMvcValidatorInstanceSupplier() {
return BeanInstanceSupplier.<Validator>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "mvcValidator")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).mvcValidator());
}
/**
* Get the bean definition for 'mvcValidator'.
*/
public static BeanDefinition getMvcValidatorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(Validator.class);
beanDefinition.setInstanceSupplier(getMvcValidatorInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'mvcContentNegotiationManager'.
*/
private static BeanInstanceSupplier<ContentNegotiationManager> getMvcContentNegotiationManagerInstanceSupplier(
) {
return BeanInstanceSupplier.<ContentNegotiationManager>forFactoryMethod(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class, "mvcContentNegotiationManager")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(WebMvcAutoConfiguration.EnableWebMvcConfiguration.class).mvcContentNegotiationManager());
}
/**
* Get the bean definition for 'mvcContentNegotiationManager'.
*/
public static BeanDefinition getMvcContentNegotiationManagerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ContentNegotiationManager.class);
beanDefinition.setInstanceSupplier(getMvcContentNegotiationManagerInstanceSupplier());
return beanDefinition;
}
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 4753
} | 168 |
package org.springframework.data.jpa.repository.support;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link EntityManagerBeanDefinitionRegistrarPostProcessor}.
*/
@Generated
public class EntityManagerBeanDefinitionRegistrarPostProcessor__BeanDefinitions {
/**
* Get the bean definition for 'emBeanDefinitionRegistrarPostProcessor'.
*/
public static BeanDefinition getEmBeanDefinitionRegistrarPostProcessorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(EntityManagerBeanDefinitionRegistrarPostProcessor.class);
beanDefinition.setLazyInit(true);
beanDefinition.setInstanceSupplier(EntityManagerBeanDefinitionRegistrarPostProcessor::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/EntityManagerBeanDefinitionRegistrarPostProcessor__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/EntityManagerBeanDefinitionRegistrarPostProcessor__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 255
} | 169 |
recursive-include swift/utils *.py
recursive-include swift/llm/data *.*
recursive-include swift/llm/ds_config *.json
recursive-include requirements *.txt
recursive-include swift/llm/agent *.json
| swift/MANIFEST.in/0 | {
"file_path": "swift/MANIFEST.in",
"repo_id": "swift",
"token_count": 59
} | 170 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/business_ethics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/business_ethics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 171 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/high_school_chemistry/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/high_school_chemistry/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 172 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/jurisprudence/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/jurisprudence/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 173 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/public_relations/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/public_relations/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 174 |
# 基本使用
tuner是指附加在模型上的额外结构部分,用于减少训练参数量或者提高训练精度。目前SWIFT支持的tuners有:
1. LoRA: [LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/abs/2106.09685)
2. LoRA+: [LoRA+: Efficient Low Rank Adaptation of Large Models](https://arxiv.org/pdf/2402.12354.pdf)
3. LLaMA PRO: [LLAMA PRO: Progressive LLaMA with Block Expansion](https://arxiv.org/pdf/2401.02415.pdf)
4. GaLore: [GaLore: Memory-Efficient LLM Training by Gradient Low-Rank Projection](https://arxiv.org/abs/2403.03507)
5. LISA: [LISA: Layerwise Importance Sampling for Memory-Efficient Large Language Model Fine-Tuning](https://arxiv.org/abs/2403.17919)
6. UnSloth: https://github.com/unslothai/unsloth
7. SCEdit: [SCEdit: Efficient and Controllable Image Diffusion Generation via Skip Connection Editing](https://arxiv.org/abs/2312.11392) < [arXiv](https://arxiv.org/abs/2312.11392) | [Project Page](https://scedit.github.io/) >
8. NEFTune: [Noisy Embeddings Improve Instruction Finetuning](https://arxiv.org/abs/2310.05914)
9. LongLoRA: [Efficient Fine-tuning of Long-Context Large Language Models](https://arxiv.org/abs/2309.12307)
10. Adapter: [Parameter-Efficient Transfer Learning for NLP](http://arxiv.org/abs/1902.00751)
11. Vision Prompt Tuning: [Visual Prompt Tuning](https://arxiv.org/abs/2203.12119)
12. Side: [Side-Tuning: A Baseline for Network Adaptation via Additive Side Networks](https://arxiv.org/abs/1912.13503)
13. Res-Tuning: [Res-Tuning: A Flexible and Efficient Tuning Paradigm via Unbinding Tuner from Backbone](https://arxiv.org/abs/2310.19859) < [arXiv](https://arxiv.org/abs/2310.19859) | [Project Page](https://res-tuning.github.io/) | [Usage](ResTuning.md) >
14. [PEFT](https://github.com/huggingface/peft)提供的tuners, 如IA3, AdaLoRA等
## 在训练中使用
调用`Swift.prepare_model()`来将tuners添加到模型上:
```python
from modelscope import Model
from swift import Swift, LoraConfig
import torch
model = Model.from_pretrained('ZhipuAI/chatglm3-6b', torch_dtype=torch.bfloat16, device_map='auto')
lora_config = LoraConfig(
r=16,
target_modules=['query_key_value'],
lora_alpha=32,
lora_dropout=0.)
model = Swift.prepare_model(model, lora_config)
```
也可以同时使用多个tuners:
```python
from modelscope import Model
from swift import Swift, LoraConfig, AdapterConfig
import torch
model = Model.from_pretrained('ZhipuAI/chatglm3-6b', torch_dtype=torch.bfloat16, device_map='auto')
lora_config = LoraConfig(
r=16,
target_modules=['query_key_value'],
lora_alpha=32,
lora_dropout=0.)
adapter_config = AdapterConfig(
dim=model.config.hidden_size,
target_modules=['mlp'],
method_name='forward',
hidden_pos=0,
adapter_length=32,
)
model = Swift.prepare_model(model, {'first_tuner': lora_config, 'second_tuner': adapter_config})
# use model to do other things
```
在使用多个tuners时,传入的第二个参数需要是Dict,key是tuner名字,value是tuner配置。
训练后可以调用:
```python
model.save_pretrained(save_directory='./output')
```
来存储模型checkpoint。模型的checkpoint文件只会包括tuners的权重,不会包含模型本身的权重。存储后的结构如下:
> outputs
>
> |-- configuration.json
>
> |-- first_tuner
>
> |-- adapter_config.json
>
> |-- adapter_model.bin
>
> |-- second_tuner
>
> |-- adapter_config.json
>
> |-- adapter_model.bin
>
> |-- ...
如果只传入单独的config,则会使用默认的名称`default`:
> outputs
>
> |-- configuration.json
>
> |-- default
>
> |-- adapter_config.json
>
> |-- adapter_model.bin
>
> |-- ...
### 完整的训练代码
```python
# A100 18G memory
from swift import Seq2SeqTrainer, Seq2SeqTrainingArguments
from modelscope import MsDataset, AutoTokenizer
from modelscope import AutoModelForCausalLM
from swift import Swift, LoraConfig
from swift.llm import get_template, TemplateType
import torch
# 拉起模型
model = AutoModelForCausalLM.from_pretrained('ZhipuAI/chatglm3-6b', torch_dtype=torch.bfloat16, device_map='auto', trust_remote_code=True)
lora_config = LoraConfig(
r=16,
target_modules=['query_key_value'],
lora_alpha=32,
lora_dropout=0.05)
model = Swift.prepare_model(model, lora_config)
tokenizer = AutoTokenizer.from_pretrained('ZhipuAI/chatglm3-6b', trust_remote_code=True)
dataset = MsDataset.load('AI-ModelScope/alpaca-gpt4-data-en', split='train')
template = get_template(TemplateType.chatglm3, tokenizer, max_length=1024)
def encode(example):
inst, inp, output = example['instruction'], example.get('input', None), example['output']
if output is None:
return {}
if inp is None or len(inp) == 0:
q = inst
else:
q = f'{inst}\n{inp}'
example, kwargs = template.encode({'query': q, 'response': output})
return example
dataset = dataset.map(encode).filter(lambda e: e.get('input_ids'))
dataset = dataset.train_test_split(test_size=0.001)
train_dataset, val_dataset = dataset['train'], dataset['test']
train_args = Seq2SeqTrainingArguments(
output_dir='output',
learning_rate=1e-4,
num_train_epochs=2,
eval_steps=500,
save_steps=500,
evaluation_strategy='steps',
save_strategy='steps',
dataloader_num_workers=4,
per_device_train_batch_size=1,
gradient_accumulation_steps=16,
logging_steps=10,
)
trainer = Seq2SeqTrainer(
model=model,
args=train_args,
data_collator=template.data_collator,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer)
trainer.train()
```
## 在推理时使用
使用`Swift.from_pretrained()`来拉起训练后存储的checkpoint:
```python
from modelscope import Model
from swift import Swift
import torch
model = Model.from_pretrained('ZhipuAI/chatglm2-6b', torch_dtype=torch.bfloat16, device_map='auto')
model = Swift.from_pretrained(model, './output')
```
### 完整的推理代码
```python
# A100 14G memory
import torch
from modelscope import AutoModelForCausalLM, GenerationConfig
from modelscope import AutoTokenizer
from swift import Swift
from swift.llm import get_template, TemplateType, to_device
# 拉起模型
model = AutoModelForCausalLM.from_pretrained('ZhipuAI/chatglm3-6b', torch_dtype=torch.bfloat16,
device_map='auto', trust_remote_code=True)
model = Swift.from_pretrained(model, 'output/checkpoint-xxx')
tokenizer = AutoTokenizer.from_pretrained('ZhipuAI/chatglm3-6b', trust_remote_code=True)
template = get_template(TemplateType.chatglm3, tokenizer, max_length=1024)
examples, tokenizer_kwargs = template.encode({'query': 'How are you?'})
if 'input_ids' in examples:
input_ids = torch.tensor(examples['input_ids'])[None]
examples['input_ids'] = input_ids
token_len = input_ids.shape[1]
generation_config = GenerationConfig(
max_new_tokens=1024,
temperature=0.3,
top_k=25,
top_p=0.8,
do_sample=True,
repetition_penalty=1.0,
num_beams=10,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id)
device = next(model.parameters()).device
examples = to_device(examples, device)
generate_ids = model.generate(
generation_config=generation_config,
**examples)
generate_ids = template.get_generate_ids(generate_ids, token_len)
print(tokenizer.decode(generate_ids, **tokenizer_kwargs))
# I'm an AI language model, so I don't have feelings or physical sensations. However, I'm here to assist you with any questions or tasks you may have. How can I help you today?
```
# 接口列表
## Swift类静态接口
- `Swift.prepare_model(model, config, **kwargs)`
- 接口作用:加载某个tuner到模型上,如果是PeftConfig的子类,则使用Peft库的对应接口加载tuner。在使用SwiftConfig的情况下,本接口可以传入SwiftModel实例并重复调用,此时和config传入字典的效果相同。
- 本接口支持并行加载不同类型的多个tuners共同使用
- 参数:
- `model`: `torch.nn.Module`或`SwiftModel`的实例,被加载的模型
- `config`: `SwiftConfig`、`PeftConfig`的实例,或者一个自定义tuner名称对config的字典
- 返回值:`SwiftModel`或`PeftModel`的实例
- `Swift.merge_and_unload(model)`
- 接口作用:将LoRA weights合并回原模型,并将LoRA部分完全卸载
- 参数:
- model: `SwiftModel`或`PeftModel`的实例,已加载LoRA的模型实例
- 返回值:None
- `Swift.merge(model)`
- 接口作用:将LoRA weights合并回原模型,不卸载LoRA部分
- 参数:
- model: `SwiftModel`或`PeftModel`的实例,已加载LoRA的模型实例
- 返回值:None
- `Swift.unmerge(model)`
- 接口作用:将LoRA weights从原模型weights中拆分回LoRA结构
- 参数:
- model: `SwiftModel`或`PeftModel`的实例,已加载LoRA的模型实例
- 返回值:None
- `Swift.save_to_peft_format(ckpt_dir, output_dir)`
- 接口作用:将存储的LoRA checkpoint转换为Peft兼容的格式。主要改变有:
- `default`会从对应的`default`文件夹中拆分到output_dir根目录中
- weights中的`{tuner_name}.`字段会被移除,如`model.layer.0.self.in_proj.lora_A.default.weight`会变为`model.layer.0.self.in_proj.lora_A.weight`
- weights中的key会增加`basemodel.model`前缀
- 注意:只有LoRA可以被转换,其他类型tuner由于Peft本身不支持,因此会报转换错误。此外,由于LoRAConfig中存在额外参数,如`dtype`,因此在这些参数有设定的情况下,不支持转换为Peft格式,此时可以手动删除adapter_config.json中的对应字段
- 参数:
- ckpt_dir:原weights目录
- output_dir:目标weights目录
- 返回值:None
- `Swift.from_pretrained(model, model_id, adapter_name, revision, **kwargs)`
- 接口作用:从存储的weights目录中加载起tuner到模型上,如果adapter_name不传,则会将model_id目录下所有的tuners都加载起来。同`prepare_model`相同,本接口可以重复调用
- 参数:
- model:`torch.nn.Module`或`SwiftModel`的实例,被加载的模型
- model_id:`str`类型,待加载的tuner checkpoint, 可以是魔搭hub的id,或者训练产出的本地目录
- adapter_name:`str`或`List[str]`或`Dict[str, str]`类型或`None`,待加载tuner目录中的tuner名称,如果为`None`则加载所有名称的tuners,如果是`str`或`List[str]`则只加载某些具体的tuner,如果是`Dict`,则将`key`指代的tuner加载起来后换成`value`的名字
- revision: 如果model_id是魔搭的id,则revision可以指定对应版本号
## SwiftModel接口
下面列出用户可能调用的接口列表,其他内部接口或不推荐使用的接口可以通过`make docs`命令查看API Doc文档。
- `SwiftModel.create_optimizer_param_groups(self, **defaults)`
- 接口作用:根据加载的tuners创建parameter groups,目前仅对`LoRA+`算法有作用
- 参数:
- defaults:`optimizer_groups`的默认参数,如`lr`和`weight_decay`
- 返回值:
- 创建的`optimizer_groups`
- `SwiftModel.add_weighted_adapter(self, ...)`
- 接口作用:将已有的LoRA tuners合并为一个
- 参数:
- 本接口是PeftModel.add_weighted_adapter的透传,参数可以参考:[add_weighted_adapter文档](https://huggingface.co/docs/peft/main/en/package_reference/lora#peft.LoraModel.add_weighted_adapter)
- `SwiftModel.save_pretrained(self, save_directory, safe_serialization, adapter_name)`
- 接口作用:存储tuner weights
- 参数:
- save_directory:存储目录
- safe_serialization: 是否使用safe_tensors,默认为False
- adapter_name:存储的adapter tuner,如果不传则默认存储所有的tuners
- `SwiftModel.set_active_adapters(self, adapter_names, offload=None)`
- 接口作用:设置当前激活的adapters,不在列表中的adapters会被失活
- 在`推理`时支持环境变量`USE_UNIQUE_THREAD=0/1`,默认值`1`,如果为`0`则set_active_adapters只对当前线程生效,此时默认使用本线程激活的tuners,不同线程tuners互不干扰
- 参数:
- adapter_names:激活的tuners
- offload:失活的adapters如何处理,默认为`None`代表留在显存中,同时支持`cpu`和`meta`,代表offload到cpu和meta设备中以减轻显存消耗,在`USE_UNIQUE_THREAD=0`时offload不要传值以免影响其他线程
- 返回值:None
- `SwiftModel.activate_adapter(self, adapter_name)`
- 接口作用:激活一个tuner
- 在`推理`时支持环境变量`USE_UNIQUE_THREAD=0/1`,默认值`1`,如果为`0`则activate_adapter只对当前线程生效,此时默认使用本线程激活的tuners,不同线程tuners互不干扰
- 参数:
- adapter_name:待激活的tuner名字
- 返回值:None
- `SwiftModel.deactivate_adapter(self, adapter_name, offload)`
- 接口作用:失活一个tuner
- 在`推理`时环境变量`USE_UNIQUE_THREAD=0`时不要调用本接口
- 参数:
- adapter_name:待失活的tuner名字
- offload:失活的adapters如何处理,默认为`None`代表留在显存中,同时支持`cpu`和`meta`,代表offload到cpu和meta设备中以减轻显存消耗
- 返回值:None
- `SwiftModel.get_trainable_parameters(self)`
- 接口作用:返回训练参数信息
- 参数:无
- 返回值:训练参数信息,格式如下:
```text
trainable params: 100M || all params: 1000M || trainable%: 10.00% || cuda memory: 10GiB.
```
| swift/docs/source/GetStarted/使用tuners.md/0 | {
"file_path": "swift/docs/source/GetStarted/使用tuners.md",
"repo_id": "swift",
"token_count": 7108
} | 175 |
# Qwen1.5全流程最佳实践
这里介绍对**Qwen1.5-7B-Chat**和对**Qwen1.5-72B-Chat**进行推理, 自我认知微调, 量化, 部署. 分别对应**低配置和高配置**环境.
使用双卡80GiB A100对**Qwen2-72B-Instruct**进行自我认知微调并推理部署的最佳实践可以查看[这里](https://github.com/modelscope/swift/issues/1092).
## 目录
- [环境准备](#环境准备)
- [Qwen1.5-7B-Chat](#qwen15-7b-chat)
- [推理](#推理)
- [自我认知微调](#自我认知微调)
- [微调后推理](#微调后推理)
- [量化](#量化)
- [部署](#部署)
- [Qwen1.5-72B-Chat](#qwen15-72b-chat)
- [推理](#推理-1)
- [自我认知微调](#自我认知微调-1)
- [微调后推理](#微调后推理-1)
- [量化](#量化-1)
- [部署](#部署-1)
## 环境准备
```shell
pip install 'ms-swift[llm]' -U
# autoawq和cuda版本有对应关系,请按照`https://github.com/casper-hansen/AutoAWQ`选择版本
pip install autoawq
# vllm与cuda版本有对应关系,请按照`https://docs.vllm.ai/en/latest/getting_started/installation.html`选择版本
pip install vllm
pip install openai
```
## Qwen1.5-7B-Chat
### 推理
这里我们会对Qwen1.5-7B-Chat及其**awq-int4量化**版本进行**流式**推理, 并展示使用**可视化**方式推理.
使用python推理`qwen1half-7b-chat`:
```python
# Experimental environment: 3090
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
get_model_tokenizer, get_template, inference, ModelType,
get_default_template_type, inference_stream
)
from swift.utils import seed_everything
import torch
model_type = ModelType.qwen1half_7b_chat
template_type = get_default_template_type(model_type)
print(f'template_type: {template_type}') # template_type: qwen
kwargs = {}
# kwargs['use_flash_attn'] = True # 使用flash_attn
model, tokenizer = get_model_tokenizer(model_type, torch.float16,
model_kwargs={'device_map': 'auto'}, **kwargs)
# 修改max_new_tokens
model.generation_config.max_new_tokens = 128
template = get_template(template_type, tokenizer)
seed_everything(42)
query = '浙江的省会在哪里?'
response, history = inference(model, template, query)
print(f'query: {query}')
print(f'response: {response}')
# 流式
query = '这有什么好吃的?'
gen = inference_stream(model, template, query, history)
print_idx = 0
print(f'query: {query}\nresponse: ', end='')
for response, history in gen:
delta = response[print_idx:]
print(delta, end='', flush=True)
print_idx = len(response)
print()
print(f'history: {history}')
"""
[INFO:swift] model.max_model_len: 32768
[INFO:swift] Global seed set to 42
query: 浙江的省会在哪里?
response: 浙江省的省会是杭州市。
query: 这有什么好吃的?
response: 浙江有很多美食,比如杭州的西湖醋鱼、东坡肉、龙井虾仁,宁波的汤圆、奉化芋头羹,温州的鱼饼、楠溪江豆腐干,嘉兴的南湖菱角等等。每一道菜都有其独特的风味和历史背景,值得一试。
history: [['浙江的省会在哪里?', '浙江省的省会是杭州市。'], ['这有什么好吃的?', '浙江有很多美食,比如杭州的西湖醋鱼、东坡肉、龙井虾仁,宁波的汤圆、奉化芋头羹,温州的鱼饼、楠溪江豆腐干,嘉兴的南湖菱角等等。每一道菜都有其独特的风味和历史背景,值得一试。']]
"""
```
使用python推理`qwen1half-7b-chat-awq`, 这里我们使用**VLLM**进行推理加速:
```python
# Experimental environment: 3090
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_vllm, inference_stream_vllm
)
import torch
model_type = ModelType.qwen1half_7b_chat_awq
llm_engine = get_vllm_engine(model_type, torch.float16, max_model_len=4096)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
# 与`transformers.GenerationConfig`类似的接口
llm_engine.generation_config.max_new_tokens = 512
request_list = [{'query': '你好!'}, {'query': '浙江的省会在哪?'}]
resp_list = inference_vllm(llm_engine, template, request_list)
for request, resp in zip(request_list, resp_list):
print(f"query: {request['query']}")
print(f"response: {resp['response']}")
# 流式
history1 = resp_list[1]['history']
query = '这有什么好吃的'
request_list = [{'query': query, 'history': history1}]
gen = inference_stream_vllm(llm_engine, template, request_list)
print_idx = 0
print(f'query: {query}\nresponse: ', end='')
for resp_list in gen:
request = request_list[0]
resp = resp_list[0]
response = resp['response']
delta = response[print_idx:]
print(delta, end='', flush=True)
print_idx = len(response)
print()
print(f"history: {resp_list[0]['history']}")
"""
query: 你好!
response: 你好!有什么问题我可以帮助你吗?
query: 浙江的省会在哪?
response: 浙江省的省会是杭州市。
query: 这有什么好吃的
response: 浙江有很多美食,以下列举一些具有代表性的:
1. 杭州菜:杭州作为浙江的省会,以其精致细腻、注重原汁原味而闻名,如西湖醋鱼、龙井虾仁、叫化童鸡等都是特色菜品。
2. 宁波汤圆:宁波的汤圆皮薄馅大,甜而不腻,尤其是冬至和元宵节时,当地人会吃宁波汤圆庆祝。
3. 温州鱼丸:温州鱼丸选用新鲜鱼类制作,口感弹滑,味道鲜美,常常配以海鲜煮食。
4. 嘉兴粽子:嘉兴粽子以其独特的三角形和咸甜两种口味著名,特别是五芳斋的粽子非常有名。
5. 金华火腿:金华火腿是中国著名的腌制肉类,肉质紧实,香味浓郁,常作为节日礼品。
6. 衢州烂柯山豆腐干:衢州豆腐干质地细腻,味道鲜美,是浙江的传统小吃。
7. 舟山海鲜:浙江沿海地带的舟山有丰富的海鲜资源,如梭子蟹、带鱼、乌贼等,新鲜美味。
以上只是部分浙江美食,浙江各地还有许多特色小吃,你可以根据自己的口味去尝试。
history: [('浙江的省会在哪?', '浙江省的省会是杭州市。'), ('这有什么好吃的', '浙江有很多美食,以下列举一些具有代表性的:\n\n1. 杭州菜:杭州作为浙江的省会,以其精致细腻、注重原汁原味而闻名,如西湖醋鱼、龙井虾仁、叫化童鸡等都是特色菜品。\n\n2. 宁波汤圆:宁波的汤圆皮薄馅大,甜而不腻,尤其是冬至和元宵节时,当地人会吃宁波汤圆庆祝。\n\n3. 温州鱼丸:温州鱼丸选用新鲜鱼类制作,口感弹滑,味道鲜美,常常配以海鲜煮食。\n\n4. 嘉兴粽子:嘉兴粽子以其独特的三角形和咸甜两种口味著名,特别是五芳斋的粽子非常有名。\n\n5. 金华火腿:金华火腿是中国著名的腌制肉类,肉质紧实,香味浓郁,常作为节日礼品。\n\n6. 衢州烂柯山豆腐干:衢州豆腐干质地细腻,味道鲜美,是浙江的传统小吃。\n\n7. 舟山海鲜:浙江沿海地带的舟山有丰富的海鲜资源,如梭子蟹、带鱼、乌贼等,新鲜美味。\n\n以上只是部分浙江美食,浙江各地还有许多特色小吃,你可以根据自己的口味去尝试。')]
"""
```
使用可视化方式推理, 并使用VLLM:
```shell
CUDA_VISIBLE_DEVICES=0 swift app-ui \
--model_type qwen1half-7b-chat \
--infer_backend vllm --max_model_len 4096
```
效果如下:

### 自我认知微调
接下来我们对模型进行自我认知微调, 使用**十分钟**训练属于自己的大模型. 比如, 我们想让模型认为自己是"小黄"而不是"通义千问"; 由"魔搭"训练, 而不是"阿里云".
使用python:
```python
# Experimental environment: 3090
# 24GB GPU memory
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import DatasetName, ModelType, SftArguments, sft_main
sft_args = SftArguments(
model_type=ModelType.qwen1half_7b_chat,
dataset=[f'{DatasetName.alpaca_zh}#500', f'{DatasetName.alpaca_en}#500',
f'{DatasetName.self_cognition}#500'],
logging_steps=5,
max_length=2048,
learning_rate=1e-4,
output_dir='output',
lora_target_modules=['ALL'],
model_name=['小黄', 'Xiao Huang'],
model_author=['魔搭', 'ModelScope'])
output = sft_main(sft_args)
best_model_checkpoint = output['best_model_checkpoint']
print(f'best_model_checkpoint: {best_model_checkpoint}')
```
如果你想要在3090的机器中进行训练, 你可以**降低max_length**为1024, 使用模型并行, 或者使用deepspeed-zero3.
使用模型并行:
```shell
# Experimental environment: 2 * 3090
# 2 * 18GB GPU memory
CUDA_VISIBLE_DEVICES=0,1 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset alpaca-zh#500 alpaca-en#500 self-cognition#500 \
--logging_steps 5 \
--max_length 2048 \
--learning_rate 1e-4 \
--output_dir output \
--lora_target_modules ALL \
--model_name 小黄 'Xiao Huang' \
--model_author 魔搭 ModelScope \
```
使用**zero2**进行分布式训练的脚本:
```shell
# Experimental environment: 4 * 3090
# 4 * 24GB GPU memory
CUDA_VISIBLE_DEVICES=0,1,2,3 \
NPROC_PER_NODE=4 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset alpaca-zh#500 alpaca-en#500 self-cognition#500 \
--logging_steps 5 \
--max_length 2048 \
--learning_rate 1e-4 \
--output_dir output \
--lora_target_modules ALL \
--model_name 小黄 'Xiao Huang' \
--model_author 魔搭 ModelScope \
--deepspeed default-zero2 \
```
如果你想要使用**界面的方式进行训练**, 可以输入以下命令, 并填入相应的值:
```shell
swift web-ui
```

### 微调后推理
随后我们验证模型微调后的效果.
使用python进行推理:
```python
# Experimental environment: 3090
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
get_model_tokenizer, get_template, inference, ModelType, get_default_template_type,
)
from swift.utils import seed_everything
from swift.tuners import Swift
seed_everything(42)
ckpt_dir = 'output/qwen1half-7b-chat/vx-xxx/checkpoint-xxx'
model_type = ModelType.qwen1half_7b_chat
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, model_kwargs={'device_map': 'auto'})
model.generation_config.max_new_tokens = 128
model = Swift.from_pretrained(model, ckpt_dir, inference_mode=True)
template = get_template(template_type, tokenizer)
query = '你是qwen吗?'
response, history = inference(model, template, query)
print(f'response: {response}')
print(f'history: {history}')
"""
[INFO:swift] model.max_model_len: 32768
response: 不是,我是魔搭的人工智能助手小黄。有什么我可以帮助你的吗?
history: [('你是qwen吗?', '不是,我是魔搭的人工智能助手小黄。有什么我可以帮助你的吗?')]
"""
```
使用界面的方式推理:
```shell
# Experimental environment: 3090
CUDA_VISIBLE_DEVICES=0 swift app-ui \
--ckpt_dir output/qwen1half-7b-chat/vx-xxx/checkpoint-xxx \
--infer_backend vllm --max_model_len 4096 \
--merge_lora true
```
效果如下:

### 量化
接下来, 我们介绍如何对微调后的模型进行**awq-int4量化**. 整个量化过程大概需要**20分钟**.
```shell
# Experimental environment: 3090
# 14GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir output/qwen1half-7b-chat/vx-xxx/checkpoint-xxx \
--quant_bits 4 --quant_method awq \
--merge_lora true
```
使用python推理量化后的模型, 并使用VLLM加速:
```python
# Experimental environment: 3090
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_vllm, inference_stream_vllm
)
import torch
model_type = ModelType.qwen1half_7b_chat
model_id_or_path = 'output/qwen1half-7b-chat/vx-xxx/checkpoint-xxx-merged-awq-int4'
llm_engine = get_vllm_engine(model_type,
model_id_or_path=model_id_or_path,
max_model_len=4096)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
# 与`transformers.GenerationConfig`类似的接口
llm_engine.generation_config.max_new_tokens = 512
request_list = [{'query': '你是谁?'}, {'query': '浙江的省会在哪?'}]
resp_list = inference_vllm(llm_engine, template, request_list)
for request, resp in zip(request_list, resp_list):
print(f"query: {request['query']}")
print(f"response: {resp['response']}")
# 流式
history1 = resp_list[1]['history']
query = '这有什么好吃的'
request_list = [{'query': query, 'history': history1}]
gen = inference_stream_vllm(llm_engine, template, request_list)
print_idx = 0
print(f'query: {query}\nresponse: ', end='')
for resp_list in gen:
request = request_list[0]
resp = resp_list[0]
response = resp['response']
delta = response[print_idx:]
print(delta, end='', flush=True)
print_idx = len(response)
print()
print(f"history: {resp_list[0]['history']}")
"""
query: 你是谁?
response: 我是魔搭的人工智能助手,我的名字叫小黄。我可以回答各种问题,提供信息和帮助。有什么我可以帮助你的吗?
query: 浙江的省会在哪?
response: 浙江省的省会是杭州市。
query: 这有什么好吃的
response: 浙江省的美食非常丰富,其中最著名的有杭州的西湖醋鱼、东坡肉、龙井虾仁等。此外,浙江还有许多其他美食,如宁波的汤圆、绍兴的臭豆腐、嘉兴的粽子等。
history: [('浙江的省会在哪?', '浙江省的省会是杭州市。'), ('这有什么好吃的', '浙江省的美食非常丰富,其中最著名的有杭州的西湖醋鱼、东坡肉、龙井虾仁等。此外,浙江还有许多其他美食,如宁波的汤圆、绍兴的臭豆腐、嘉兴的粽子等。')]
"""
```
### 部署
最后, 我们将量化后的模型使用**Openai API**的格式部署起来:
启动服务端:
```shell
# Experimental environment: 3090
CUDA_VISIBLE_DEVICES=0 swift deploy \
--ckpt_dir output/qwen1half-7b-chat/vx-xxx/checkpoint-xxx-merged-awq-int4 \
--infer_backend vllm --max_model_len 4096
```
客户端进行调用:
```python
from openai import OpenAI
client = OpenAI(
api_key='EMPTY',
base_url='http://localhost:8000/v1',
)
model_type = client.models.list().data[0].id
print(f'model_type: {model_type}')
messages = []
for query in ['你是谁?', "what's your name?", '你是谁研发的?']:
messages.append({
'role': 'user',
'content': query
})
resp = client.chat.completions.create(
model=model_type,
messages=messages,
seed=42)
response = resp.choices[0].message.content
print(f'query: {query}')
print(f'response: {response}')
messages.append({'role': 'assistant', 'content': response})
# 流式
for query in ['78654+657=?', '晚上睡不着觉怎么办']:
messages.append({'role': 'user', 'content': query})
stream_resp = client.chat.completions.create(
model=model_type,
messages=messages,
stream=True,
seed=42)
print(f'query: {query}')
print('response: ', end='')
response = ''
for chunk in stream_resp:
response += chunk.choices[0].delta.content
print(chunk.choices[0].delta.content, end='', flush=True)
print()
messages.append({'role': 'assistant', 'content': response})
"""
model_type: qwen1half-7b-chat
query: 你是谁?
response: 我是魔搭的人工智能助手,我的名字叫小黄。我可以回答各种问题,提供信息和帮助。有什么我可以帮助你的吗?
query: what's your name?
response: My name is Xiao Huang. I am an AI assistant developed by ModelScope. How can I assist you?
query: 你是谁研发的?
response: 我是由魔搭研发的人工智能助手。
query: 78654+657=?
response: 78654 + 657 = 79311
query: 晚上睡不着觉怎么办
response: 晚上睡不着觉可能是因为压力、焦虑、环境因素等。以下是一些可能有助于改善睡眠质量的建议:
1. 保持规律的作息时间,尽量在每天同一时间上床睡觉和起床。
2. 避免在睡前使用电子设备,因为蓝光可能会干扰你的睡眠。
3. 尝试进行放松和冥想的活动,如深呼吸、瑜伽或冥想。
4. 避免在睡前摄入咖啡因和酒精,因为它们可能会干扰你的睡眠。
5. 如果你经常感到焦虑或压力,可以尝试进行一些放松的活动,如阅读、听音乐或冥想。
6. 如果以上方法都无法改善你的睡眠质量,建议你寻求医生的帮助,因为可能存在其他健康问题。
希望这些建议对你有所帮助。
"""
```
## Qwen1.5-72B-Chat
### 推理
与之前7B演示不同的是, 这里我们使用**CLI**的方式推理:
```shell
# Experimental environment: 4 * A100
RAY_memory_monitor_refresh_ms=0 CUDA_VISIBLE_DEVICES=0,1,2,3 swift infer \
--model_type qwen1half-72b-chat \
--infer_backend vllm --tensor_parallel_size 4
```
输出:
```python
"""
<<< 你是谁?
我是来自阿里云的大规模语言模型,我叫通义千问。
--------------------------------------------------
<<< 浙江的省会在哪?
浙江的省会是杭州。
--------------------------------------------------
<<< 这有什么好玩的?
杭州有许多著名的旅游景点,如西湖、灵隐寺、宋城、西溪湿地等。西湖的美景四季皆宜,可以欣赏到苏堤春晓、雷峰夕照等著名景观。灵隐寺是中国著名的佛教寺庙,有着深厚的历史文化底蕴。宋城则是一个以宋代文化为主题的公园,可以体验到古代中国的风情。西溪湿地则是一个自然保护区,适合散步、骑行和观鸟。此外,杭州的美食也值得一试,比如龙井虾仁、西湖醋鱼和杭州酱鸭等。
"""
```
### 自我认知微调
这里使用deepspeed-**zero3**进行微调, 大约需要**30分钟**:
```shell
# Experimental environment: 4 * A100
# 4 * 70GB GPU memory
CUDA_VISIBLE_DEVICES=0,1,2,3 \
NPROC_PER_NODE=4 \
swift sft \
--model_type qwen1half-72b-chat \
--dataset alpaca-zh#500 alpaca-en#500 self-cognition#500 \
--logging_steps 5 \
--max_length 4096 \
--learning_rate 1e-4 \
--output_dir output \
--lora_target_modules ALL \
--model_name 小黄 'Xiao Huang' \
--model_author 魔搭 ModelScope \
--deepspeed default-zero3 \
```
### 微调后推理
同样的, 这里使用CLI的方式进行推理:
```shell
# Experimental environment: 4 * A100
RAY_memory_monitor_refresh_ms=0 CUDA_VISIBLE_DEVICES=0,1,2,3 swift infer \
--ckpt_dir output/qwen1half-72b-chat/vx-xxx/checkpoint-xxx \
--infer_backend vllm --tensor_parallel_size 4 \
--merge_lora true
```
输出:
```python
"""
<<< 你是谁?
我是魔搭创建的人工智能语言模型,我的名字叫小黄。我的目的是通过文本输入与用户进行交流,提供信息、回答问题、进行对话和执行任务。如果你有任何问题或需要帮助,请随时告诉我。
--------------------------------------------------
<<< 浙江的省会在哪?
浙江的省会是杭州。
--------------------------------------------------
<<< 这有什么好玩的?
杭州有很多好玩的地方,比如西湖、灵隐寺、宋城、西溪湿地等等。如果你喜欢自然风光,可以去西湖边散步,欣赏美丽的湖景和古建筑。如果你对历史感兴趣,可以去灵隐寺和宋城参观,感受古代文化和历史的韵味。如果你喜欢户外活动,可以去西溪湿地徒步,享受大自然的美景和宁静。
"""
```
### 量化
对微调后的模型进行awq-int4量化. 整个量化过程大概需要**2小时**.
```shell
# Experimental environment: A100
# 30GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir output/qwen1half-72b-chat/vx-xxx/checkpoint-xxx \
--quant_bits 4 --quant_method awq \
--merge_lora true
```
### 部署
经过量化后, 我们可以在**单卡A100**上进行部署.
启动服务端:
```shell
# Experimental environment: A100
CUDA_VISIBLE_DEVICES=0 swift deploy \
--ckpt_dir output/qwen1half-72b-chat/vx-xxx/checkpoint-xxx-merged-awq-int4 \
--infer_backend vllm --max_model_len 8192
```
使用客户端调用:
```python
from openai import OpenAI
client = OpenAI(
api_key='EMPTY',
base_url='http://localhost:8000/v1',
)
model_type = client.models.list().data[0].id
print(f'model_type: {model_type}')
messages = []
for query in ['你是谁?', "what's your name?", '你是谁研发的?']:
messages.append({
'role': 'user',
'content': query
})
resp = client.chat.completions.create(
model=model_type,
messages=messages,
seed=42)
response = resp.choices[0].message.content
print(f'query: {query}')
print(f'response: {response}')
messages.append({'role': 'assistant', 'content': response})
# 流式
for query in ['78654+657=?', '晚上睡不着觉怎么办']:
messages.append({'role': 'user', 'content': query})
stream_resp = client.chat.completions.create(
model=model_type,
messages=messages,
stream=True,
seed=42)
print(f'query: {query}')
print('response: ', end='')
response = ''
for chunk in stream_resp:
response += chunk.choices[0].delta.content
print(chunk.choices[0].delta.content, end='', flush=True)
print()
messages.append({'role': 'assistant', 'content': response})
"""
model_type: qwen1half-72b-chat
query: 你是谁?
response: 我是由魔搭开发的人工智能语言模型,可以回答问题、提供信息、进行对话和解决问题。有什么我可以帮助你的吗?
query: what's your name?
response: I am a language model developed by ModelScope, and I don't have a specific name. You can call me Xiao Huang or Xiao Huang. How can I help you?
query: 你是谁研发的?
response: 我是由魔搭研发的人工智能语言模型。
query: 78654+657=?
response: 78654 + 657 = 79311
query: 晚上睡不着觉怎么办
response: 如果你晚上睡不着觉,可以尝试以下方法:
1. 放松身心:在睡觉前做一些放松身心的活动,如冥想、深呼吸、瑜伽等。
2. 避免刺激:避免在睡觉前看电视、玩手机、喝咖啡等刺激性活动。
3. 调整环境:保持室内温度适宜、光线柔和、噪音低等。
4. 定期运动:定期进行适量的运动,有助于身体疲劳,有助于睡眠。
5. 建立规律:建立规律的作息时间,有助于调整身体的生物钟。
如果以上方法无效,建议咨询医生或专业人士。
"""
```
| swift/docs/source/LLM/Qwen1.5全流程最佳实践.md/0 | {
"file_path": "swift/docs/source/LLM/Qwen1.5全流程最佳实践.md",
"repo_id": "swift",
"token_count": 12582
} | 176 |
# InternVL 最佳实践
## 目录
- [环境准备](#环境准备)
- [推理](#推理)
- [微调](#微调)
- [微调后推理](#微调后推理)
## 环境准备
```shell
git clone https://github.com/modelscope/swift.git
cd swift
pip install -e '.[llm]'
pip install Pillow
```
## 推理
推理[internvl-chat-v1.5](https://www.modelscope.cn/models/AI-ModelScope/InternVL-Chat-V1-5/summary)和[internvl-chat-v1.5-int8](https://www.modelscope.cn/models/AI-ModelScope/InternVL-Chat-V1-5-int8/summary)
下面教程以`internvl-chat-v1.5`为例,你可以修改`--model_type internvl-chat-v1_5-int8`来选择int8版本的模型,使用`mini-internvl-chat-2b-v1_5`或
`mini-internvl-chat-4b-v1_5`来使用Mini-Internvl
**注意**
- 如果要使用本地模型文件,加上参数 `--model_id_or_path /path/to/model`
- 如果你的GPU不支持flash attention, 使用参数`--use_flash_attn false`。且对于int8模型,推理时需要指定`dtype --bf16`, 否则可能会出现乱码
- 模型本身config中的max_length较小,为2048,可以设置`--max_length`来修改
- 可以使用参数`--gradient_checkpoting true`减少显存占用
- InternVL系列模型的**训练**只支持带有图片的数据集
```shell
# Experimental environment: A100
# 55GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift infer --model_type internvl-chat-v1_5 --dtype bf16 --max_length 4096
# 2*30GB GPU memory
CUDA_VISIBLE_DEVICES=0,1 swift infer --model_type internvl-chat-v1_5 --dtype bf16 --max_length 4096
```
输出: (支持传入本地路径或URL)
```python
"""
<<< 你是谁
Input a media path or URL <<<
我是一个人工智能助手,旨在通过自然语言处理和机器学习技术来帮助用户解决问题和完成任务。
--------------------------------------------------
<<< clear
<<< 描述这张图片
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png
这张图片是一只小猫咪的特写照片。这只小猫咪有着蓝灰色的眼睛和白色的毛发,上面有灰色和黑色的条纹。它的耳朵是尖的,眼睛睁得大大的,看起来非常可爱和好奇。背景是模糊的,无法分辨具体的环境,但看起来像是在室内,有柔和的光线。
--------------------------------------------------
<<< clear
<<< 图中有几只羊
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png
图中有四只羊。
--------------------------------------------------
<<< clear
<<< 计算结果是多少?
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/math.png
将两个数相加,得到:
1452 + 45304 = 46766
因此,1452 + 45304 = 46766。
--------------------------------------------------
<<< clear
<<< 根据图片中的内容写首诗
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/poem.png
夜色笼罩水面,
小舟轻摇入画帘。
星辉闪烁如珠串,
月色朦胧似轻烟。
树影婆娑映水面,
静谧宁和心自安。
夜深人静思无限,
唯有舟影伴我眠。
--------------------------------------------------
<<< clear
<<< 对图片进行OCR
Input a media path or URL <<< https://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/ocr.png
图中所有文字:
简介
SWIFT支持250+LLM和35+MLLM(多模态大模型)的训练、推
理、评测和部署。开发者可以直接将我们的框架应用到自己的Research和
生产环境中,实现模型训练评测到应用的完整链路。我们除支持
PEFT提供的轻量训练方案外,也提供了一个完整的Adapters库以支持
最新的训练技术,如NEFTune、LoRA+、LLaMA-PRO等,这个适配
器库可以脱离训练脚本直接使用在自已的自定义流程中。
为了方便不熟悉深度学习的用户使用,我们提供了一个Gradio的web-ui
于控制训练和推理,并提供了配套的深度学习课程和最佳实践供新手入
此外,我们也正在拓展其他模态的能力,目前我们支持了AnimateDiff的全参
数训练和LoRA训练。
SWIFT具有丰富的文档体系,如有使用问题请查看这里:
可以在Huggingface space和ModelScope创空间中体验SWIFT web-
ui功能了。
"""
```
示例图片如下:
cat:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png" width="250" style="display: inline-block;">
animal:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png" width="250" style="display: inline-block;">
math:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/math.png" width="250" style="display: inline-block;">
poem:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/poem.png" width="250" style="display: inline-block;">
ocr:
<img src="https://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/ocr.png" width="250" style="display: inline-block;">
**单样本推理**
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
get_model_tokenizer, get_template, inference, ModelType,
get_default_template_type, inference_stream
)
from swift.utils import seed_everything
import torch
model_type = ModelType.internvl_chat_v1_5
template_type = get_default_template_type(model_type)
print(f'template_type: {template_type}')
model, tokenizer = get_model_tokenizer(model_type, torch.bfloat16,
model_kwargs={'device_map': 'auto'})
# for GPUs that do not support flash attention
# model, tokenizer = get_model_tokenizer(model_type, torch.float16,
# model_kwargs={'device_map': 'auto'},
# use_flash_attn = False)
model.generation_config.max_new_tokens = 256
template = get_template(template_type, tokenizer)
seed_everything(42)
images = ['http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png']
query = '距离各城市多远?'
response, history = inference(model, template, query, images=images) # chat with image
print(f'query: {query}')
print(f'response: {response}')
# 流式
query = '距离最远的城市是哪?'
gen = inference_stream(model, template, query, history) # chat without image
print_idx = 0
print(f'query: {query}\nresponse: ', end='')
for response, history in gen:
delta = response[print_idx:]
print(delta, end='', flush=True)
print_idx = len(response)
print()
print(f'history: {history}')
"""
query: 距离各城市多远?
response: 这张图片显示的是一个路标,上面标示了三个目的地及其距离:
- 马踏(Mata):14公里
- 阳江(Yangjiang):62公里
- 广州(Guangzhou):293公里
这些距离是按照路标上的指示来计算的。
query: 距离最远的城市是哪?
response: 根据这张图片,距离最远的城市是广州(Guangzhou),距离为293公里。
history: [['距离各城市多远?', '这张图片显示的是一个路标,上面标示了三个目的地及其距离:\n\n- 马踏(Mata):14公里\n- 阳江(Yangjiang):62公里\n- 广州(Guangzhou):293公里\n\n这些距离是按照路标上的指示来计算的。 '], ['距离最远的城市是哪?', '根据这张图片,距离最远的城市是广州(Guangzhou),距离为293公里。 ']]
"""
```
示例图片如下:
road:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png" width="250" style="display: inline-block;">
## 微调
多模态大模型微调通常使用**自定义数据集**进行微调. 这里展示可直接运行的demo:
LoRA微调:
**注意**
- 默认只对LLM部分的qkv进行lora微调. 如果你想对所有linear含vision模型部分都进行微调, 可以指定`--lora_target_modules ALL`.
- 如果你的GPU不支持flash attention, 使用参数`--use_flash_attn false`
```shell
# Experimental environment: A100
# 80GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift sft \
--model_type internvl-chat-v1_5 \
--dataset coco-en-2-mini \
--max_length 4096
# device_map
# Experimental environment: 2*A100...
# 2*43GB GPU memory
CUDA_VISIBLE_DEVICES=0,1 swift sft \
--model_type internvl-chat-v1_5 \
--dataset coco-en-2-mini \
--max_length 4096
# ddp + deepspeed-zero2
# Experimental environment: 2*A100...
# 2*80GB GPU memory
NPROC_PER_NODE=2 \
CUDA_VISIBLE_DEVICES=0,1 swift sft \
--model_type internvl-chat-v1_5 \
--dataset coco-en-2-mini \
--max_length 4096 \
--deepspeed default-zero2
```
全参数微调:
```bash
# Experimental environment: 4 * A100
# device map
# 4 * 72 GPU memory
CUDA_VISIBLE_DEVICES=0,1,2,3 swift sft \
--model_type internvl-chat-v1_5 \
--dataset coco-en-2-mini \
--max_length 4096 \
--sft_type full \
```
[自定义数据集](../LLM/自定义与拓展.md#-推荐命令行参数的形式)支持json, jsonl样式, 以下是自定义数据集的例子:
(只支持单轮对话, 每轮对话必须包含一张图片, 支持传入本地路径或URL)
```jsonl
{"query": "55555", "response": "66666", "images": ["image_path"]}
{"query": "eeeee", "response": "fffff", "images": ["image_path"]}
{"query": "EEEEE", "response": "FFFFF", "images": ["image_path"]}
```
## 微调后推理
直接推理:
```shell
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir output/internvl-chat-v1_5/vx-xxx/checkpoint-xxx \
--load_dataset_config true \
--max_length 4096
```
**merge-lora**并推理:
```shell
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir "output/internvl-chat-v1_5/vx-xxx/checkpoint-xxx" \
--merge_lora true
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir "output/internvl-chat-v1_5/vx-xxx/checkpoint-xxx-merged" \
--load_dataset_config true \
--max_length 4096
# device map
CUDA_VISIBLE_DEVICES=0,1 swift infer \
--ckpt_dir "output/internvl-chat-v1_5/vx-xxx/checkpoint-xxx-merged" \
--load_dataset_config true \
--max_length 4096
```
| swift/docs/source/Multi-Modal/internvl最佳实践.md/0 | {
"file_path": "swift/docs/source/Multi-Modal/internvl最佳实践.md",
"repo_id": "swift",
"token_count": 5226
} | 177 |
# LLM Human Alignment Training Documentation
## Table of Contents
- [Environment Preparation](#environment-preparation)
- [Human Alignment Training](#human-alignment-training)
## Environment Preparation
GPU devices: A10, 3090, V100, A100 are all acceptable. For GPUs with memory <=24G, at least a dual-card environment is required. Since human alignment training loads two models on one card, it occupies more memory than fine-tuning due to an additional inference model's memory consumption.
```bash
# Install ms-swift
git clone https://github.com/modelscope/swift.git
cd swift
pip install -e '.[llm]'
# Environment alignment (usually not necessary. If you encounter errors, you can run the following code, the repository uses the latest environment for testing)
pip install -r requirements/framework.txt -U
pip install -r requirements/llm.txt -U
```
## Human Alignment Training
The following shell script runs a human alignment training. First, you need to switch to the runtime directory:
```shell
cd examples/pytorch/llm
```
Run the following command:
```shell
# Experimental environment: 4*A100
# Memory usage: 4 * 20G, dual-card device_map * 2ddp
nproc_per_node=2
CUDA_VISIBLE_DEVICES=0,1,2,3 \
NPROC_PER_NODE=$nproc_per_node \
MASTER_PORT=29500 \
swift rlhf \
--rlhf_type dpo \
--model_type yi-6b-chat \
--ref_model_type yi-6b-chat \
--model_revision master \
--sft_type lora \
--tuner_backend swift \
--dtype AUTO \
--output_dir output \
--dataset hh-rlhf-cn:harmless_base_cn \
--num_train_epochs 3 \
--max_length 1024 \
--max_prompt_length 512 \
--check_dataset_strategy none \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 5e-5 \
--gradient_accumulation_steps $(expr 16 / $nproc_per_node) \
--max_grad_norm 1.0 \
--warmup_ratio 0.03 \
--eval_steps 2000 \
--save_steps 2000 \
--save_total_limit 2 \
--logging_steps 10 \
```
### Shell Script
The sh script can be viewed [here](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/dpo).
```bash
# The following script needs to be executed in this directory
cd examples/pytorch/llm
```
**Tips**:
- We default to setting `--gradient_checkpointing true` during training to **save memory**, which will slightly reduce training speed.
- If you are using older GPUs such as **V100**, you need to set `--dtype AUTO` or `--dtype fp16`, because they do not support bf16.
- If your machine has high-performance graphics cards like A100 and you are using the qwen series models, we recommend installing [**flash-attn**](https://github.com/Dao-AILab/flash-attention), which will speed up training and inference as well as reduce memory usage (A10, 3090, V100, etc. graphics cards do not support training with flash-attn). Models that support flash-attn can be viewed in [LLM Supported Models](Supported-models-datasets.md#models)
- If you need to train offline, please use `--model_id_or_path <model_dir>` and set `--check_model_is_latest false`. For specific parameter meanings, please see [Command Line Arguments](Command-line-parameters.md).
- If you want to push weights to the ModelScope Hub during training, you need to set `--push_to_hub true`.
```bash
# dpo training for mistral-7b max_length=1024, bs=1
# Recommended experimental environment: V100, A10, 3090, 2 cards, 4 cards or 8 cards
bash scripts/dpo/lora_ddp_mp/dpo.sh
bash scripts/dpo/lora_ddp_mp/infer.sh
```
Since DPO training will result in a complete model or adapter weights, the steps for LoRA merging and inference are the same as for fine-tuning, so please refer to the corresponding steps in the [Fine-tuning Documentation](LLM-fine-tuning.md#merge-lora).
| swift/docs/source_en/LLM/DPO.md/0 | {
"file_path": "swift/docs/source_en/LLM/DPO.md",
"repo_id": "swift",
"token_count": 1318
} | 178 |
# CogVLM Best Practice
## Table of Contents
- [Environment Setup](#environment-setup)
- [Inference](#inference)
- [Fine-tuning](#fine-tuning)
- [Inference After Fine-tuning](#inference-after-fine-tuning)
## Environment Setup
```shell
git clone https://github.com/modelscope/swift.git
cd swift
pip install -e '.[llm]'
```
## Inference
Inference with [cogvlm-17b-chat](https://modelscope.cn/models/ZhipuAI/cogvlm-chat/summary):
```shell
# Experimental environment: A100
# 38GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift infer --model_type cogvlm-17b-chat
```
Output: (supports passing local path or URL)
```python
"""
<<< Describe this image.
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png
This image showcases a close-up of a young kitten. The kitten has a fluffy coat with a mix of white, gray, and brown colors. Its eyes are strikingly blue, and it appears to be gazing directly at the viewer. The background is blurred, emphasizing the kitten as the main subject.
--------------------------------------------------
<<< clear
<<< How many sheep are in the picture?
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png
There are four sheep in the picture.
--------------------------------------------------
<<< clear
<<< What is the calculation result?
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/math.png
The calculation result is '1452+45304=45456'.
--------------------------------------------------
<<< clear
<<< Write a poem based on the content of the picture.
Input a media path or URL <<< http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/poem.png
In a world where night and day intertwine,
A boat floats gently, reflecting the moon's shine.
Fireflies dance, their glow a mesmerizing trance,
As the boat sails through a tranquil, enchanted expanse.
"""
```
Example images are shown below:
cat:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png" width="250" style="display: inline-block;">
animal:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png" width="250" style="display: inline-block;">
math:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/math.png" width="250" style="display: inline-block;">
poem:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/poem.png" width="250" style="display: inline-block;">
**Single-sample inference**
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
get_model_tokenizer, get_template, inference, ModelType,
get_default_template_type, inference_stream
)
from swift.utils import seed_everything
import torch
model_type = ModelType.cogvlm_17b_chat
template_type = get_default_template_type(model_type)
print(f'template_type: {template_type}')
model, tokenizer = get_model_tokenizer(model_type, torch.float16,
model_kwargs={'device_map': 'auto'})
model.generation_config.max_new_tokens = 256
template = get_template(template_type, tokenizer)
seed_everything(42)
images = ['http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png']
query = 'How far is it from each city?'
response, _ = inference(model, template, query, images=images)
print(f'query: {query}')
print(f'response: {response}')
# Streaming
query = 'Which city is the farthest?'
images = images
gen = inference_stream(model, template, query, images=images)
print_idx = 0
print(f'query: {query}\nresponse: ', end='')
for response, _ in gen:
delta = response[print_idx:]
print(delta, end='', flush=True)
print_idx = len(response)
print()
"""
query: How far is it from each city?
response: From Mata, it is 14 km; from Yangjiang, it is 62 km; and from Guangzhou, it is 293 km.
query: Which city is the farthest?
response: Guangzhou is the farthest city with a distance of 293 km.
"""
```
Example image is shown below:
road:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png" width="250" style="display: inline-block;">
## Fine-tuning
Fine-tuning multimodal large models usually uses **custom datasets**. Here is a demo that can be run directly:
(By default, lora fine-tuning is performed on the qkv of the language and vision models. If you want to fine-tune all linears, you can specify `--lora_target_modules ALL`)
```shell
# Experimental environment: A100
# 50GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift sft \
--model_type cogvlm-17b-chat \
--dataset coco-en-2-mini \
```
[Custom datasets](../LLM/Customization.md#-Recommended-Command-line-arguments) support json, jsonl formats. Here is an example of a custom dataset:
(Supports multi-turn dialogue, but each conversation can only include one image. Support local file paths or URLs for input)
```jsonl
{"query": "55555", "response": "66666", "images": ["image_path"]}
{"query": "eeeee", "response": "fffff", "history": [], "images": ["image_path"]}
{"query": "EEEEE", "response": "FFFFF", "history": [["AAAAA", "BBBBB"], ["CCCCC", "DDDDD"]], "images": ["image_path"]}
```
## Inference After Fine-tuning
Direct inference:
```shell
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir output/cogvlm-17b-chat/vx-xxx/checkpoint-xxx \
--load_dataset_config true \
```
**merge-lora** and inference:
```shell
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir output/cogvlm-17b-chat/vx-xxx/checkpoint-xxx \
--merge_lora true
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir output/cogvlm-17b-chat/vx-xxx/checkpoint-xxx-merged \
--load_dataset_config true
```
| swift/docs/source_en/Multi-Modal/cogvlm-best-practice.md/0 | {
"file_path": "swift/docs/source_en/Multi-Modal/cogvlm-best-practice.md",
"repo_id": "swift",
"token_count": 1911
} | 179 |
# Experimental environment: 3090,A10,V100...
# 20GB GPU memory
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_type atom-7b-chat \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--dtype AUTO \
--output_dir output \
--ddp_backend nccl \
--dataset ms-bench \
--num_train_epochs 3 \
--max_length 2048 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules DEFAULT \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
| swift/examples/pytorch/llm/scripts/atom_7b_chat/lora/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/atom_7b_chat/lora/sft.sh",
"repo_id": "swift",
"token_count": 363
} | 180 |
# Experimental environment: V100, A10, 3090
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/cogagent-18b-chat/vx-xxx/checkpoint-xx" \
--load_args_from_ckpt_dir true \
--eval_human true \
--max_new_tokens 2048 \
--temperature 0.3 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
| swift/examples/pytorch/llm/scripts/cogagent_18b_chat/lora/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/cogagent_18b_chat/lora/infer.sh",
"repo_id": "swift",
"token_count": 162
} | 181 |
# Experiment env: A10, RTX3090/4090, A100
# 1 * 7.5GB GPU memory
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_sft.py \
--model_type llama2-7b-aqlm-2bit-1x16 \
--dataset dureader-robust-zh \
--batch_size 4 \
--max_length 1024 \
--gradient_accumulation_steps 2 \
--learning_rate 5e-5 \
--use_flash_attn true \
--eval_steps 1000 \
--save_steps 1000 \
--train_dataset_sample -1 \
--num_train_epochs 2 \
--check_dataset_strategy none \
--gradient_checkpointing true \
--weight_decay 0.1 \
--max_grad_norm 1.0 \
--warmup_ratio 0.03 \
--save_total_limit 2 \
--logging_steps 10 \
--sft_type lora \
--lora_target_modules ALL \
--lora_rank 8 \
--lora_alpha 32
| swift/examples/pytorch/llm/scripts/llama2_7b_aqlm_2bit_1x16/lora/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/llama2_7b_aqlm_2bit_1x16/lora/sft.sh",
"repo_id": "swift",
"token_count": 305
} | 182 |
# Experimental environment: A100
# 50GB GPU memory
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_type qwen1half-72b-chat-int4 \
--sft_type lora \
--output_dir output \
--dataset codefuse-python-en \
--train_dataset_sample -1 \
--num_train_epochs 3 \
--max_length 2048 \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn true \
| swift/examples/pytorch/llm/scripts/qwen1half_72b_chat_int4/qlora/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen1half_72b_chat_int4/qlora/sft.sh",
"repo_id": "swift",
"token_count": 327
} | 183 |
# Experimental environment: A100
# 17GB GPU memory
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_type qwen1half-moe-a2_7b-chat-int4 \
--sft_type lora \
--output_dir output \
--dataset blossom-math-zh \
--train_dataset_sample -1 \
--num_train_epochs 3 \
--max_length 2048 \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn true \
| swift/examples/pytorch/llm/scripts/qwen1half_moe_a2_7b_chat_int4/qlora/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen1half_moe_a2_7b_chat_int4/qlora/sft.sh",
"repo_id": "swift",
"token_count": 332
} | 184 |
# Experimental environment: A100
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/sus-34b-chat/vx-xxx/checkpoint-xxx" \
| swift/examples/pytorch/llm/scripts/sus_34b_chat/lora/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/sus_34b_chat/lora/infer.sh",
"repo_id": "swift",
"token_count": 55
} | 185 |
# Experimental environment: A100
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/xverse-13b-256k/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.7 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
| swift/examples/pytorch/llm/scripts/xverse_13b_256k/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/xverse_13b_256k/infer.sh",
"repo_id": "swift",
"token_count": 143
} | 186 |
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python infer_controlnet_sdxl.py \
--base_model_path "AI-ModelScope/stable-diffusion-xl-base-1.0" \
--controlnet_path "train_controlnet_sdxl" \
--prompt "pale golden rod circle with old lace background" \
--control_image_path "conditioning_image_1.png" \
--image_save_path "output.png" \
--torch_dtype "fp16" \
--seed 0 \
| swift/examples/pytorch/sdxl/scripts/run_infer_controlnet_sdxl.sh/0 | {
"file_path": "swift/examples/pytorch/sdxl/scripts/run_infer_controlnet_sdxl.sh",
"repo_id": "swift",
"token_count": 166
} | 187 |
PYTHONPATH=../../../ \
accelerate launch train_text_to_image_sdxl.py \
--pretrained_model_name_or_path="AI-ModelScope/stable-diffusion-xl-base-1.0" \
--pretrained_vae_model_name_or_path="AI-ModelScope/sdxl-vae-fp16-fix" \
--dataset_name="AI-ModelScope/pokemon-blip-captions" \
--resolution=512 \
--center_crop \
--random_flip \
--proportion_empty_prompts=0.2 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--max_train_steps=10000 \
--use_8bit_adam \
--learning_rate=1e-06 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--mixed_precision="fp16" \
--report_to="tensorboard" \
--validation_prompt="a cute Sundar Pichai creature" \
--validation_epochs 5 \
--checkpointing_steps=5000 \
--output_dir="train_text_to_image_sdxl" \
| swift/examples/pytorch/sdxl/scripts/run_train_text_to_image_sdxl.sh/0 | {
"file_path": "swift/examples/pytorch/sdxl/scripts/run_train_text_to_image_sdxl.sh",
"repo_id": "swift",
"token_count": 337
} | 188 |
Metadata-Version: 2.1
Name: ms-swift
Version: 2.2.0.dev0
Summary: Swift: Scalable lightWeight Infrastructure for Fine-Tuning
Home-page: https://github.com/modelscope/swift
Author: DAMO ModelScope teams
Author-email: contact@modelscope.cn
License: Apache License 2.0
Keywords: python,petl,efficient tuners
Classifier: Development Status :: 4 - Beta
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Description-Content-Type: text/markdown
License-File: LICENSE
Requires-Dist: accelerate
Requires-Dist: aiohttp
Requires-Dist: binpacking
Requires-Dist: dacite
Requires-Dist: jieba
Requires-Dist: matplotlib
Requires-Dist: modelscope>=1.14
Requires-Dist: nltk
Requires-Dist: numpy<2.0
Requires-Dist: optimum>=1.17.0
Requires-Dist: pandas
Requires-Dist: peft<0.12.0,>=0.11.0
Requires-Dist: requests
Requires-Dist: rouge
Requires-Dist: safetensors
Requires-Dist: tensorboard
Requires-Dist: tqdm
Requires-Dist: transformers<4.42,>=4.33
Requires-Dist: transformers_stream_generator
Requires-Dist: trl>=0.9.4
Provides-Extra: llm
Requires-Dist: charset_normalizer; extra == "llm"
Requires-Dist: cpm_kernels; extra == "llm"
Requires-Dist: fastapi; extra == "llm"
Requires-Dist: gradio>=3.40.0; extra == "llm"
Requires-Dist: sentencepiece; extra == "llm"
Requires-Dist: tiktoken; extra == "llm"
Requires-Dist: uvicorn; extra == "llm"
Provides-Extra: aigc
Requires-Dist: decord; extra == "aigc"
Requires-Dist: diffusers==0.25.0; extra == "aigc"
Requires-Dist: einops; extra == "aigc"
Requires-Dist: torchvision; extra == "aigc"
Provides-Extra: eval
Requires-Dist: llmuses>=0.3.1; extra == "eval"
Provides-Extra: seq-parallel
Requires-Dist: xtuner; extra == "seq-parallel"
Requires-Dist: charset_normalizer; extra == "seq-parallel"
Requires-Dist: cpm_kernels; extra == "seq-parallel"
Requires-Dist: fastapi; extra == "seq-parallel"
Requires-Dist: gradio>=3.40.0; extra == "seq-parallel"
Requires-Dist: sentencepiece; extra == "seq-parallel"
Requires-Dist: tiktoken; extra == "seq-parallel"
Requires-Dist: uvicorn; extra == "seq-parallel"
Provides-Extra: all
Requires-Dist: accelerate; extra == "all"
Requires-Dist: aiohttp; extra == "all"
Requires-Dist: binpacking; extra == "all"
Requires-Dist: dacite; extra == "all"
Requires-Dist: jieba; extra == "all"
Requires-Dist: matplotlib; extra == "all"
Requires-Dist: modelscope>=1.14; extra == "all"
Requires-Dist: nltk; extra == "all"
Requires-Dist: numpy<2.0; extra == "all"
Requires-Dist: optimum>=1.17.0; extra == "all"
Requires-Dist: pandas; extra == "all"
Requires-Dist: peft<0.12.0,>=0.11.0; extra == "all"
Requires-Dist: requests; extra == "all"
Requires-Dist: rouge; extra == "all"
Requires-Dist: safetensors; extra == "all"
Requires-Dist: tensorboard; extra == "all"
Requires-Dist: tqdm; extra == "all"
Requires-Dist: transformers<4.42,>=4.33; extra == "all"
Requires-Dist: transformers_stream_generator; extra == "all"
Requires-Dist: trl>=0.9.4; extra == "all"
Requires-Dist: charset_normalizer; extra == "all"
Requires-Dist: cpm_kernels; extra == "all"
Requires-Dist: fastapi; extra == "all"
Requires-Dist: gradio>=3.40.0; extra == "all"
Requires-Dist: sentencepiece; extra == "all"
Requires-Dist: tiktoken; extra == "all"
Requires-Dist: uvicorn; extra == "all"
Requires-Dist: decord; extra == "all"
Requires-Dist: diffusers==0.25.0; extra == "all"
Requires-Dist: einops; extra == "all"
Requires-Dist: torchvision; extra == "all"
Requires-Dist: llmuses>=0.3.1; extra == "all"
Requires-Dist: xtuner; extra == "all"
# SWIFT (Scalable lightWeight Infrastructure for Fine-Tuning)
<p align="center">
<br>
<img src="resources/banner.png"/>
<br>
<p>
<p align="center">
<a href="https://modelscope.cn/home">ModelScope Community Website</a>
<br>
<a href="README_CN.md">中文</a>   |   English  
</p>
<p align="center">
<img src="https://img.shields.io/badge/python-%E2%89%A53.8-5be.svg">
<img src="https://img.shields.io/badge/pytorch-%E2%89%A51.12%20%7C%20%E2%89%A52.0-orange.svg">
<a href="https://github.com/modelscope/modelscope/"><img src="https://img.shields.io/badge/modelscope-%E2%89%A51.9.5-5D91D4.svg"></a>
<a href="https://pypi.org/project/ms-swift/"><img src="https://badge.fury.io/py/ms-swift.svg"></a>
<a href="https://github.com/modelscope/swift/blob/main/LICENSE"><img src="https://img.shields.io/github/license/modelscope/swift"></a>
<a href="https://pepy.tech/project/ms-swift"><img src="https://pepy.tech/badge/ms-swift"></a>
<a href="https://github.com/modelscope/swift/pulls"><img src="https://img.shields.io/badge/PR-welcome-55EB99.svg"></a>
</p>
<p align="center">
<a href="https://trendshift.io/repositories/6427" target="_blank"><img src="https://trendshift.io/api/badge/repositories/6427" alt="modelscope%2Fswift | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</p>
## 📖 Table of Contents
- [Introduction](#-introduction)
- [News](#-news)
- [Installation](#%EF%B8%8F-installation)
- [Getting Started](#-getting-started)
- [Documentation](#-documentation)
- [License](#-License)
- [Citation](#-citation)
- [WeChat Group](#-Wechat-Group)
## 📝 Introduction
SWIFT supports training, inference, evaluation and deployment of **250+ LLMs and 35+ MLLMs** (multimodal large models). Developers can directly apply our framework to their own research and production environments to realize the complete workflow from model training and evaluation to application. In addition to supporting the lightweight training solutions provided by [PEFT](https://github.com/huggingface/peft), we also provide a complete **Adapters library** to support the latest training techniques such as NEFTune, LoRA+, LLaMA-PRO, etc. This adapter library can be used directly in your own custom workflow without our training scripts.
To facilitate use by users unfamiliar with deep learning, we provide a Gradio web-ui for controlling training and inference, as well as accompanying deep learning courses and best practices for beginners.
Additionally, we are expanding capabilities for other modalities. Currently, we support full-parameter training and LoRA training for AnimateDiff.
SWIFT has rich documentations for users, please check [here](https://github.com/modelscope/swift/tree/main/docs/source_en/LLM/index.md).
SWIFT web-ui is available both on [Huggingface space](https://huggingface.co/spaces/tastelikefeet/swift) and [ModelScope studio](https://www.modelscope.cn/studios/iic/Scalable-lightWeight-Infrastructure-for-Fine-Tuning/summary), please feel free to try!
## 🎉 News
- 🔥2024.06.18: Supports **DeepSeek-Coder-v2** series model! Use model_type `deepseek-coder-v2-instruct` and `deepseek-coder-v2-lite-instruct` to begin.
- 🔥2024.06.16: Supports **KTO** and **CPO** training! See [document](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/Human-Preference-Alignment-Training-Documentation.md) to start training!
- 2024.06.11: Support for tool-calling agent deployment that conform to the OpenAI interface.You can refer to [Agent deployment best practice](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/Agent-deployment-best-practice.md)
- 🔥2024.06.07: Support **Qwen2** series LLM, including Base and Instruct models of 0.5B, 1.5B, 7B, and 72B, as well as corresponding quantized versions gptq-int4, gptq-int8, and awq-int4. The best practice for self-cognition fine-tuning, inference and deployment of Qwen2-72B-Instruct using dual-card 80GiB A100 can be found [here](https://github.com/modelscope/swift/issues/1092).
- 🔥2024.06.05: Support for **glm4** series LLM and glm4v-9b-chat MLLM. You can refer to [glm4v best practice](docs/source_en/Multi-Modal/glm4v-best-practice.md).
- 🔥2024.06.01: Supports **SimPO** training! See [document](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/SimPO.md) to start training!
- 🔥2024.06.01: Support for deploying large multimodal models, please refer to the [Multimodal Deployment Documentation](docs/source_en/Multi-Modal/mutlimodal-deployment.md) for more information.
- 2024.05.31: Supports Mini-Internvl model, Use model_type `mini-internvl-chat-2b-v1_5` and `mini-internvl-chat-4b-v1_5`to train.
- 2024.05.24: Supports Phi3-vision model, Use model_type `phi3-vision-128k-instruct` to train.
- 2024.05.22: Supports DeepSeek-V2-Lite series models, model_type are `deepseek-v2-lite` and `deepseek-v2-lite-chat`
- 2024.05.22: Supports TeleChat-12B-v2 model with quantized version, model_type are `telechat-12b-v2` and `telechat-12b-v2-gptq-int4`
- 🔥2024.05.21: Inference and fine-tuning support for MiniCPM-Llama3-V-2_5 are now available. For more details, please refer to [minicpm-v-2.5 Best Practice](docs/source/Multi-Modal/minicpm-v-2.5最佳实践.md).
- 🔥2024.05.20: Support for inferencing and fine-tuning cogvlm2-llama3-chinese-chat-19B, cogvlm2-llama3-chat-19B. you can refer to [cogvlm2 Best Practice](docs/source_en/Multi-Modal/cogvlm2-best-practice.md).
- 🔥2024.05.17: Support peft=0.11.0. Meanwhile support 3 new tuners: `BOFT`, `Vera` and `Pissa`. use `--sft_type boft/vera` to use BOFT or Vera, use `--init_lora_weights pissa` with `--sft_type lora` to use Pissa.
- 2024.05.16: Supports Llava-Next (Stronger) series models. For best practice, you can refer to [here](https://github.com/modelscope/swift/tree/main/docs/source_en/Multi-Modal/llava-best-practice.md).
- 🔥2024.05.13: Support Yi-1.5 series models,use `--model_type yi-1_5-9b-chat` to begin!
- 2024.05.11: Support for qlora training and quantized inference using [hqq](https://github.com/mobiusml/hqq) and [eetq](https://github.com/NetEase-FuXi/EETQ). For more information, see the [LLM Quantization Documentation](https://github.com/modelscope/swift/tree/main/docs/source_en/LLM/LLM-quantization.md).
- 2024.05.10: Support split a sequence to multiple GPUs to reduce memory usage. Use this feature by `pip install .[seq_parallel]`, then add `--sequence_parallel_size n` to your DDP script to begin!
- 2024.05.08: Support DeepSeek-V2-Chat model, you can refer to [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/deepseek-v2-chat/lora_ddp_ds3/sft.sh).Support InternVL-Chat-V1.5-Int8 model, for best practice, you can refer to [here](https://github.com/modelscope/swift/tree/main/docs/source_en/Multi-Modal/internvl-best-practice.md).
- 🔥2024.05.07: Supoprts **ORPO** training! See [document](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/ORPO.md) to start training!
- 2024.05.07: Supports Llava-Llama3 model from xtuner,model_type is `llava-llama-3-8b-v1_1`.
- 2024.04.29: Supports inference and fine-tuning of InternVL-Chat-V1.5 model. For best practice, you can refer to [here](https://github.com/modelscope/swift/tree/main/docs/source_en/Multi-Modal/internvl-best-practice.md).
- 🔥2024.04.26: Support **LISA** and **unsloth** training! Specify `--lisa_activated_layers=2` to use LISA(to reduce the memory cost to 30 percent!), specify `--tuner_backend unsloth` to use unsloth to train a huge model(full or lora) with lesser memory(30 percent or lesser) and faster speed(5x)!
- 🔥2024.04.26: Support the fine-tuning and inference of Qwen1.5-110B and Qwen1.5-110B-Chat model, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/qwen1half_110b_chat/lora_ddp_ds/sft.sh) to start training!
<details><summary>More</summary>
- 2024.04.24: Support for inference and fine-tuning of Phi3 series models. Including: [phi3-4b-4k-instruct](examples/pytorch/llm/scripts/phi3_4b_4k_instruct/lora), phi3-4b-128k-instruct.
- 2024.04.22: Support for inference, fine-tuning, and deployment of **chinese-llama-alpaca-2** series models. This includes:chinese-llama-2-1.3b, chinese-llama-2-7b, chinese-llama-2-13b, chinese-alpaca-2-1.3b, chinese-alpaca-2-7b and chinese-alpaca-2-13b along with their corresponding 16k and 64k long text versions.
- 2024.04.22: Support for inference and fine-tuning of Llama3 GPTQ-Int4, GPTQ-Int8, and AWQ series models. Support for inference and fine-tuning of chatglm3-6b-128k, Openbuddy-Llama3.
- 2024.04.20: Support for inference, fine-tuning, and deployment of **Atom** series models. This includes: Atom-7B and Atom-7B-Chat. use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/atom_7b_chat/lora/sft.sh) to train.
- 2024.04.19: Support for single-card, DDP, ZeRO2, and ZeRO3 training and inference with NPU, please refer to [NPU Inference and Fine-tuning Best Practice](docs/source_en/LLM/NPU-best-practice.md).
- 2024.04.19: Support for inference, fine-tuning, and deployment of **Llama3** series models. This includes: Llama-3-8B, Llama-3-8B-Instruct, Llama-3-70B, and Llama-3-70B-Instruct. use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/llama3_8b_instruct/lora/sft.sh) to train.
- 2024.04.18: Supported models: wizardlm2-7b-awq, wizardlm2-8x22b, yi-6b-chat-awq, yi-6b-chat-int8, yi-34b-chat-awq, yi-34b-chat-int8. Supported `--deepspeed zero3-offload` and provided default zero3-offload configuration file for zero3+cpu offload usage.
- 2024.04.18: Supported compatibility with HuggingFace ecosystem using the environment variable `USE_HF`, switching to use models and datasets from HF. Please refer to the [HuggingFace ecosystem compatibility documentation](https://github.com/modelscope/swift/tree/main/docs/source_en/LLM/Compat-HF.md).
- 2024.04.17: Support the evaluation for OpenAI standard interfaces. Check the [parameter documentation](docs/source_en/LLM/Command-line-parameters.md#eval-parameters) for details.
- 🔥2024.04.17: Support **CodeQwen1.5-7B** series: CodeQwen1.5-7B, CodeQwen1.5-7B-Chat,CodeQwen1.5-7B-Chat-AWQ, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/codeqwen1half_7b_chat/lora/sft.sh) to train.
- 2024.04.16: Supports inference and fine-tuning of llava-v1.6-34b model. For best practice, you can refer to [here](https://github.com/modelscope/swift/tree/main/docs/source_en/Multi-Modal/llava-best-practice.md).
- 2024.04.13: Support the fine-tuning and inference of Mixtral-8x22B-v0.1 model, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/mixtral_moe_8x22b_v1/lora_ddp_ds/sft.sh) to start training!
- 2024.04.13: Support the newly launched **MiniCPM** series: MiniCPM-V-2.0、MiniCPM-2B-128k、MiniCPM-MoE-8x2B and MiniCPM-1B.use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/minicpm_moe_8x2b/lora_ddp/sft.sh) to start training!
- 🔥2024.04.11: Support Model Evaluation with MMLU/ARC/CEval datasets(also user custom eval datasets) with one command! Check [this documentation](docs/source_en/LLM/LLM-eval.md) for details. Meanwhile, we support a trick way to do multiple ablation experiments, check [this documentation](docs/source_en/LLM/LLM-exp.md) to use.
- 🔥2024.04.11: Support **c4ai-command-r** series: c4ai-command-r-plus, c4ai-command-r-v01, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/c4ai_command_r_plus/lora_mp/sft.sh) to train.
- 2024.04.10: Use SWIFT to fine-tune the qwen-7b-chat model to enhance its function call capabilities, and combine it with [Modelscope-Agent](https://github.com/modelscope/modelscope-agent) for best practices, which can be found [here](https://github.com/modelscope/swift/tree/main/docs/source_en/LLM/Agent-best-practice.md#Usage-with-Modelscope_Agent).
- 🔥2024.04.09: Support ruozhiba dataset. Search `ruozhiba` in [this documentation](docs/source_en/LLM/Supported-models-datasets.md) to begin training!
- 2024.04.08: Support the fine-tuning and inference of XVERSE-MoE-A4.2B model, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/xverse_moe_a4_2b/lora/sft.sh) to start training!
- 2024.04.04: Support **QLoRA+FSDP** to train a 70B model with two 24G memory GPUs, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_fsdp/sft.sh) to train.
- 🔥2024.04.03: Support **Qwen1.5-32B** series: Qwen1.5-32B, Qwen1.5-32B-Chat, Qwen1.5-32B-Chat-GPTQ-Int4.use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/qwen1half_32b_chat/lora_mp/sft.sh) to start training!
- 🔥2024.04.02: Support the fine-tuning and inference of Mengzi3-13B-Base model, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/mengzi3_13b_base/lora_ddp_ds/sft.sh) to start training!
- 🔥2024.04.01: Support **dbrx** series: dbrx-base and dbrx-instruct, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/dbrx-instruct/lora_mp/sft.sh) to start training!
- 🔥2024.03.29: Support **Qwen1.5-MoE** series: Qwen1.5-MoE-A2.7B, Qwen1.5-MoE-A2.7B-Chat, Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4.
- 🔥2024.03.29: Support the fine-tuning and inference of **Grok-1** 300B MoE, please view details [here](https://github.com/modelscope/swift/tree/main/docs/source_en/LLM/Grok-1-best-practice.md).
- 🔥2024.03.25: Supports inference and fine-tuning of TeleChat-7b and TeleChat-12b model, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/telechat_12b/lora/sft.sh) to start training!
- 🔥2024.03.20: Supports inference and fine-tuning for the **llava** series. For best practice, you can refer to [here](https://github.com/modelscope/swift/tree/main/docs/source_en/Multi-Modal/llava-best-practice.md).
- 🔥2024.03.12: Support inference and fine-tuning for **deepseek-vl** series. Best practices can be found [here](docs/source_en/Multi-Modal/deepseek-vl-best-practice.md).
- 🔥2024.03.11: Support [GaLore](https://arxiv.org/abs/2403.03507) for effectively reducing memory usage to 1/2 of the original in full-parameter training.
- 🔥2024.03.10: [End-to-end best practices](docs/source_en/LLM/Qwen1.5-best-practice.md) from fine-tuning to deployment for Qwen1.5-7B-Chat and Qwen1.5-72B-Chat.
- 🔥2024.03.09: Support training and inference of MAMBA model, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/mamba-1.4b/lora/sft.sh) to start training!
- 2024.03.09: Support training and inference of AQLM quantized model, use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/llama2_7b_aqlm_2bit_1x16/lora/sft.sh) to start training!
- 2024.03.06: Support training and inference of AWQ quantized model, use [this Qwen1.5-AWQ model script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/qwen1half_7b_chat_awq/lora/sft.sh) to start training, and support training and inference of [yi-9b](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/yi_9b/lora_zero3).
- 🔥2024.02.29: Support [LLaMA PRO](https://arxiv.org/pdf/2401.02415.pdf), simply use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/yi_6b_chat/llamapro/sft.sh) to start training.
- 🔥2024.02.29: Support [LoRA+](https://arxiv.org/pdf/2402.12354.pdf), simply use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/yi_6b_chat/lorap/sft.sh) to start training.
- 2024.02.25: Support `swift export` to quantize models using **AWQ/GPTQ** and push to ModelScope Hub. See documentation: [LLM Quantization](docs/source_en/LLM/LLM-quantization.md).
- 2024.02.22: Support gemma series: gemma-2b, [gemma-2b-instruct](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/gemma_2b_instruct), gemma-7b, gemma-7b-instruct.
- 2024.02.16: Support deepseek-math series: deepseek-math-7b, deepseek-math-7b-instruct, deepseek-math-7b-chat.
- 🔥2024.02.05: Support **Qwen1.5** series models, see [model list](https://github.com/modelscope/swift/blob/main/docs/source/LLM/%E6%94%AF%E6%8C%81%E7%9A%84%E6%A8%A1%E5%9E%8B%E5%92%8C%E6%95%B0%E6%8D%AE%E9%9B%86.md#%E6%A8%A1%E5%9E%8B) for all supported Qwen1.5 models. Provide fine-tuning scripts for [qwen1half-7b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen1half_7b_chat), [qwen1half-7b-chat-int8](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen1half_7b_chat_int8).
- 2024.02.05: Support training of diffusion models such as **SDXL**, **SD**, **ControlNet**, as well as **DreamBooth** training. See corresponding [training scripts](https://github.com/modelscope/swift/tree/main/examples/pytorch/sdxl/scripts) for details.
- 2024.02.01: Support minicpm series: [minicpm-2b-sft-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/minicpm_2b_sft_chat), minicpm-2b-chat.
- 🔥2024.02.01: Support dataset mixing to reduce **catastrophic forgetting**. Use `--train_dataset_mix_ratio 2.0` to enable training! We also open sourced the general knowledge dataset [ms-bench](https://www.modelscope.cn/datasets/iic/ms_bench/summary).
- 🔥2024.02.01: Support Agent training! Agent training algorithm is derived from this [paper](https://arxiv.org/pdf/2309.00986.pdf). We also added [ms-agent](https://www.modelscope.cn/datasets/iic/ms_agent/summary), a high-quality agent dataset. Use [this script](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/scripts/qwen_7b_chat/lora/sft.sh) to start Agent training!
- 🔥2024.02.01: Support adding SFT loss in DPO training to reduce repetitive generation caused by KL divergence loss.
- 2024.02.01: Support using AdaLoRA and IA3 adapters in training.
- 2024.02.01: Support `--merge_lora` parameter in AnimateDiff training.
- 2024.01.30: Support [internlm-xcomposer2-7b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/internlm_xcomposer2_7b_chat).
- 🔥2024.01.30: Support [ZeRO-3](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_14b_chat/full_ddp_zero3/), simply specify `--deepspeed default-zero3`.
- 2024.01.29: Support internlm2-math series: internlm2-math-7b, internlm2-math-7b-chat, internlm2-math-20b, internlm2-math-20b-chat.
- 🔥2024.01.26: Support [yi-vl-6b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/yi_vl_6b_chat), yi-vl-34b-chat.
- 2024.01.24: Support codefuse-codegeex2-6b-chat, codefuse-qwen-14b-chat.
- 2024.01.23: Support orion series: orion-14b, [orion-14b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/orion_14b_chat).
- 2024.01.20: Support [xverse-13b-256k](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/xverse_13b_256k), xverse-65b-v2, xverse-65b-chat.
- 🔥2024.01.17: Support internlm2 series: internlm2-7b-base, internlm2-7b, [internlm2-7b-sft-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/internlm2_7b_sft_chat), internlm2-7b-chat, internlm2-20b-base, internlm2-20b, internlm2-20b-sft-chat, internlm2-20b-chat.
- 2024.01.15: Support yuan series: yuan2-2b-instruct, [yuan2-2b-janus-instruct](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/yuan2_2b_janus_instruct), yuan2-51b-instruct, yuan2-102b-instruct.
- 🔥2024.01.12: Support **deepseek-moe** series: deepseek-moe-16b, [deepseek-moe-16b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/deepseek_moe_16b_chat).
- 🔥2024.01.04: Support **VLLM deployment**, compatible with **OpenAI API** style, see [VLLM Inference Acceleration and Deployment](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/VLLM-inference-acceleration-and-deployment.md#Deployment) for details.
- 2024.01.04: Update [Benchmark](https://github.com/modelscope/swift/blob/main/docs/source/LLM/Benchmark.md) for convenient viewing of training speed and memory usage of different models.
- 🔥2023.12.29: Support web-ui for sft training and inference, use `swift web-ui` after installing ms-swift to start.
- 🔥2023.12.29: Support DPO RLHF (Reinforcement Learning from Human Feedback) and three datasets for this task: AI-ModelScope/stack-exchange-paired, AI-ModelScope/hh-rlhf and AI-ModelScope/hh_rlhf_cn. See [documentation](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/DPO.md) to start training!
- 🔥2023.12.28: Support SCEdit! This tuner can significantly reduce memory usage in U-Net and support low-memory controllable image generation (replacing ControlNet), read the section below to learn more.
- 2023.12.23: Support [codegeex2-6b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/codegeex2_6b).
- 2023.12.19: Support [phi2-3b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/phi2_3b).
- 2023.12.18: Support VLLM for inference acceleration.
- 2023.12.15: Support deepseek, deepseek-coder series: deepseek-7b, deepseek-7b-chat, deepseek-67b, deepseek-67b-chat, openbuddy-deepseek-67b-chat, deepseek-coder-1_3b, deepseek-coder-1_3b-instruct, deepseek-coder-6_7b, deepseek-coder-6_7b-instruct, deepseek-coder-33b, deepseek-coder-33b-instruct.
- 2023.12.13: Support mistral-7b-instruct-v2, [mixtral-moe-7b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/mixtral_7b_moe), [mixtral-moe-7b-instruct](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/mixtral_7b_moe_instruct).
- 2023.12.09: Support `freeze_parameters` parameter as a compromise between lora and full-parameter training. Corresponding sh can be found in [full_freeze_ddp](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_7b_chat/full_freeze_ddp). Support `disable_tqdm`, `lazy_tokenize`, `preprocess_num_proc` parameters, see [command line arguments](https://github.com/modelscope/swift/blob/main/docs/source/LLM/%E5%91%BD%E4%BB%A4%E8%A1%8C%E5%8F%82%E6%95%B0.md) for details.
- 2023.12.08: Support [sus-34b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/sus_34b_chat), support yi-6b-200k, yi-34b-200k.
- 2023.12.07: Support [Multi-Node DDP training](https://github.com/modelscope/swift/blob/main/docs/source/LLM/LLM%E5%BE%AE%E8%B0%83%E6%96%87%E6%A1%A3.md#%E4%BD%BF%E7%94%A8cli).
- 2023.12.05: Support models: zephyr-7b-beta-chat, openbuddy-zephyr-7b-chat. Support datasets: hc3-zh, hc3-en.
- 🔥2023.12.02: [Self-cognition fine-tuning best practices](docs/source_en/LLM/Self-cognition-best-practice.md), **10 minutes to fine-tune a large model for self-cognition**, create your own unique large model.
- 🔥2023.11.30: Support training and inference of **qwen-1_8b**, **qwen-72b**, **qwen-audio** series models. Corresponding sh scripts can be found in [qwen_1_8b_chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_1_8b_chat), [qwen_72b_chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_72b_chat), [qwen_audio_chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_audio_chat)
- 🔥2023.11.29: Support training and inference of **AnimateDiff**
- 🔥2023.11.24: Support **yi-34b-chat**, **codefuse-codellama-34b-chat** models. Corresponding sh scripts can be found in [yi_34b_chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/yi_34b_chat), [codefuse_codellama_34b_chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/codefuse_codellama_34b_chat).
- 🔥2023.11.18: Support **tongyi-finance-14b** series models: tongyi-finance-14b, tongyi-finance-14b-chat, tongyi-finance-14b-chat-int4. Corresponding sh scripts can be found in [tongyi_finance_14b_chat_int4](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/tongyi_finance_14b_chat_int4).
- 2023.11.16: Support **flash attn** for more models: qwen series, qwen-vl series, llama series, openbuddy series, mistral series, yi series, ziya series. Please use `use_flash_attn` parameter.
- 🔥2023.11.11: Support **NEFTune**, simply use `Swift.prepare_model(model, NEFTuneConfig())` to enable.
- 🔥2023.11.11: Support training and inference by **command line** and inference by **Web-UI**, see `Usage with Swift CLI` section below for details.
- 🔥2023.11.10: Support **bluelm** series models: bluelm-7b, bluelm-7b-chat, bluelm-7b-32k, bluelm-7b-chat-32k. Corresponding sh scripts can be found in [bluelm_7b_chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/bluelm_7b_chat).
- 🔥2023.11.08: Support training and inference of **xverse-65b** model, script at [xverse_65b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/xverse_65b).
- 🔥2023.11.07: Support training and inference of **yi-6b**, **yi-34b** models, scripts at [yi_6b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/yi_6b), [yi_34b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/yi_34b).
- 🔥2023.10.30: Support two new tuners: **QA-LoRA** and **LongLoRA**.
- 🔥2023.10.30: Support editing models using **ROME** (Rank One Model Editing) to infuse new knowledge into models without training!
- 2023.10.30: Support **skywork-13b** series models: skywork-13b, skywork-13b-chat. Corresponding sh scripts can be found in [skywork_13b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/skywork_13b).
- 🔥2023.10.27: Support **chatglm3** series models: chatglm3-6b-base, chatglm3-6b, chatglm3-6b-32k. Corresponding sh scripts can be found in [chatglm3_6b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/chatglm3_6b).
- 🔥2023.10.17: Support SFT of **int4**, **int8** models: qwen-7b-chat-int4, qwen-14b-chat-int4, qwen-vl-chat-int4, baichuan2-7b-chat-int4, baichuan2-13b-chat-int4, qwen-7b-chat-int8, qwen-14b-chat-int8.
- 2023.10.15: Support **ziya2-13b** series models: ziya2-13b, ziya2-13b-chat.
- 2023.10.12: Support **mistral-7b** series models: openbuddy-mistral-7b-chat, mistral-7b, mistral-7b-instruct.
- 🔥2023.10.07: Support **DeepSpeed ZeRO-2**, enabling lora (not just qlora) to run DDP on dual A10 cards.
- 2023.10.04: Support more math, law, SQL, code domain datasets: blossom-math-zh, school-math-zh, text2sql-en, sql-create-context-en, lawyer-llama-zh, tigerbot-law-zh, leetcode-python-en.
- 🔥2023.09.25: Support **qwen-14b** series: qwen-14b, qwen-14b-chat.
- 2023.09.18: Support **internlm-20b** series: internlm-20b, internlm-20b-chat.
- 2023.09.12: Support **MP+DDP** to accelerate full-parameter training.
- 2023.09.05: Support **openbuddy-llama2-70b-chat**.
- 2023.09.03: Support **baichuan2** series: baichuan2-7b, baichuan2-7b-chat, baichuan2-13b, baichuan2-13b-chat.
</details>
## 🛠️ Installation
SWIFT runs in the Python environment. Please ensure your Python version is higher than 3.8.
- Method 1: Install SWIFT using pip command:
```shell
# Full capabilities
pip install 'ms-swift[all]' -U
# LLM only
pip install 'ms-swift[llm]' -U
# AIGC only
pip install 'ms-swift[aigc]' -U
# Adapters only
pip install ms-swift -U
```
- Method 2: Install SWIFT through source code (convenient for running training and inference scripts), please run the following commands:
```shell
git clone https://github.com/modelscope/swift.git
cd swift
pip install -e '.[llm]'
```
SWIFT depends on torch>=1.13, recommend torch>=2.0.0.
- Method 3: Use SWIFT in our Docker image
```shell
# China-Hangzhou image
docker pull registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu22.04-cuda12.1.0-py310-torch2.1.2-tf2.14.0-1.13.1
# US-west image
docker pull registry.us-west-1.aliyuncs.com/modelscope-repo/modelscope:ubuntu22.04-cuda12.1.0-py310-torch2.1.2-tf2.14.0-1.13.1
```
## 🚀 Getting Started
This section introduces basic usage, see the [Documentation](#-documentation) section for more ways to use.
### Web-UI
Web-UI is a gradio-based interface for **zero-threshold** training and deployment. It is easy to use and perfectly supports multi-GPU training and deployment:
```shell
SWIFT_UI_LANG=en swift web-ui
```

### Training
#### Training Scripts
You can refer to the following scripts to customize your own training script.
- full: [qwen1half-7b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen1half_7b_chat/full) (A100), [qwen-7b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp) (2\*A100)
- full+ddp+zero2: [qwen-7b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_7b_chat/full_ddp_zero2) (4\*A100)
- full+ddp+zero3: [qwen-14b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_14b_chat/full_ddp_zero3) (4\*A100)
- lora: [chatglm3-6b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/chatglm3_6b/lora) (3090), [baichuan2-13b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_mp) (2\*3090), [yi-34b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/yi_34b_chat/lora) (A100), [qwen-72b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_72b_chat/lora_mp) (2\*A100)
- lora+ddp: [chatglm3-6b](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/chatglm3_6b/lora_ddp) (2\*3090)
- lora+ddp+zero3: [qwen-14b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_14b_chat/lora_ddp_zero3) (4\*3090), [qwen-72b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_72b_chat/lora_ddp_zero3) (4\*A100)
- qlora(gptq-int4): [qwen-7b-chat-int4](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_7b_chat_int4/qlora) (3090)
- qlora(gptq-int8): [qwen1half-7b-chat-int8](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen1half_7b_chat_int8/qlora) (3090)
- qlora(bnb-int4): [qwen-7b-chat](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/qwen_7b_chat/qlora) (3090)
#### Supported Training Processes
| Training Process | Training Method |
|------------------|-------------------------------------------------------------------------------|
| Pretraining | Text Generation |
| Fine-tuning | Single-turn/Multi-turn<br>Agent Training/Self-cognition<br>Multi-modal Vision/Multi-modal Speech|
| Human Alignment | DPO<br>ORPO<br>SimPO |
| Text-to-Image | DreamBooth, etc. |
| Text-to-Video | - |
#### Single GPU Training
Start single GPU fine-tuning with the following command:
LoRA:
```shell
# Experimental Environment: A100
# GPU Memory Requirement: 20GB
# Runtime: 3.1 hours
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
--eval_steps 200 \
```
Full-parameter:
```shell
# Experimental Environment: A100
# GPU Memory Requirement: 80GB
# Runtime: 2.5 hours
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type full \
--output_dir output \
--eval_steps 500 \
```
#### Model Parallel Training
```shell
# Experimental Environment: 2 * A100
# GPU Memory Requirement: 10GB + 13GB
# Runtime: 3.4 hours
CUDA_VISIBLE_DEVICES=0,1 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
```
#### Data Parallel Training
```shell
# Experimental Environment: 4 * A100
# GPU Memory Requirement: 4 * 30GB
# Runtime: 0.8 hours
NPROC_PER_NODE=4 \
CUDA_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
```
Combining Model Parallelism and Data Parallelism:
```shell
# Experimental Environment: 4 * A100
# GPU Memory Requirement: 2*14GB + 2*18GB
# Runtime: 1.7 hours
NPROC_PER_NODE=2 \
CUDA_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
```
#### Deepspeed Training
Deepspeed supports training of quantized GPTQ and AWQ models.
ZeRO2:
```shell
# Experimental Environment: 4 * A100
# GPU Memory Requirement: 4 * 21GB
# Runtime: 0.9 hours
NPROC_PER_NODE=4 \
CUDA_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
--deepspeed default-zero2 \
```
ZeRO3:
```shell
# Experimental Environment: 4 * A100
# GPU Memory Requirement: 4 * 19GB
# Runtime: 3.2 hours
NPROC_PER_NODE=4 \
CUDA_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
--deepspeed default-zero3 \
```
ZeRO3-Offload:
```shell
# Experimental Environment: 4 * A100
# GPU Memory Requirement: 4 * 12GB
# Runtime: 60 hours
NPROC_PER_NODE=4 \
CUDA_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_id_or_path AI-ModelScope/WizardLM-2-8x22B \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
--deepspeed zero3-offload \
```
#### Multi-node Multi-GPU
```shell
# node0
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
NNODES=2 \
NODE_RANK=0 \
MASTER_ADDR=127.0.0.1 \
NPROC_PER_NODE=8 \
swift sft \
--model_id_or_path qwen1half-32b-chat \
--sft_type full \
--dataset blossom-math-zh \
--output_dir output \
--deepspeed default-zero3 \
# node1
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
NNODES=2 \
NODE_RANK=1 \
MASTER_ADDR=xxx.xxx.xxx.xxx \
NPROC_PER_NODE=8 \
swift sft \
--model_id_or_path qwen1half-32b-chat \
--sft_type full \
--dataset blossom-math-zh \
--output_dir output \
--deepspeed default-zero3 \
```
##### AliYun-DLC multi-node training
In DLC product, WORLD_SIZE is the node number, RANK is the node index, this is different from the definition of torchrun.
```shell
NNODES=$WORLD_SIZE \
NODE_RANK=$RANK \
swift sft \
--model_id_or_path qwen1half-32b-chat \
--sft_type full \
--dataset blossom-math-zh \
--output_dir output \
--deepspeed default-zero3
```
### Inference
Original model:
```shell
CUDA_VISIBLE_DEVICES=0 swift infer --model_type qwen1half-7b-chat
# use VLLM
CUDA_VISIBLE_DEVICES=0 swift infer --model_type qwen1half-7b-chat \
--infer_backend vllm --max_model_len 8192
```
LoRA fine-tuned:
```shell
CUDA_VISIBLE_DEVICES=0 swift infer --ckpt_dir xxx/checkpoint-xxx --load_dataset_config true
# use VLLM
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir xxx/checkpoint-xxx --load_dataset_config true \
--merge_lora true --infer_backend vllm --max_model_len 8192
```
### Evaluation
Original model:
```shell
# We recommend using vLLM for acceleration (arc evaluated in half a minute)
CUDA_VISIBLE_DEVICES=0 swift eval --model_type qwen1half-7b-chat \
--eval_dataset ceval mmlu arc gsm8k --infer_backend vllm
```
LoRA fine-tuned:
```shell
CUDA_VISIBLE_DEVICES=0 swift eval --ckpt_dir xxx/checkpoint-xxx \
--eval_dataset ceval mmlu arc gsm8k --infer_backend vllm \
--merge_lora true \
```
### Quantization
Original model:
```shell
CUDA_VISIBLE_DEVICES=0 swift export --model_type qwen1half-7b-chat \
--quant_bits 4 --quant_method awq
```
LoRA fine-tuned:
```shell
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir xxx/checkpoint-xxx --load_dataset_config true \
--quant_method awq --quant_bits 4 \
--merge_lora true \
```
### Deployment
The client uses the OpenAI API for invocation, for details refer to the [LLM deployment documentation](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/VLLM-inference-acceleration-and-deployment.md).
Original model:
```shell
CUDA_VISIBLE_DEVICES=0 swift deploy --model_type qwen1half-7b-chat
# 使用VLLM加速
CUDA_VISIBLE_DEVICES=0 swift deploy --model_type qwen1half-7b-chat \
--infer_backend vllm --max_model_len 8192
```
LoRA fine-tuned:
```shell
CUDA_VISIBLE_DEVICES=0 swift deploy --ckpt_dir xxx/checkpoint-xxx
# 使用VLLM加速
CUDA_VISIBLE_DEVICES=0 swift deploy \
--ckpt_dir xxx/checkpoint-xxx --merge_lora true \
--infer_backend vllm --max_model_len 8192
```
### Supported Models
The complete list of supported models and datasets can be found at [Supported Models and Datasets List](docs/source_en/LLM/Supported-models-datasets.md).
#### LLMs
| Model Type | Model Introduction | Language | Model Size | Model Type |
|------------------------------------------------|------------------------------------------------------------------------|--------------------|----------------------------------------|------------------------------------------- |
| Qwen<br>Qwen1.5<br>Qwen2 | [Tongyi Qwen 1.0 and 1.5 series models](https://github.com/QwenLM) | Chinese<br>English | 0.5B-110B<br>including quantized versions | base model<br>chat model<br>MoE model<br>code model |
| ChatGLM2<br>ChatGLM3<br>Codegeex2<br>GLM4 | [Zhipu ChatGLM series models](https://github.com/THUDM) | Chinese<br>English | 6B-9B | base model<br>chat model<br>code model<br>long text model |
| Baichuan/Baichuan2 | [Baichuan 1 and Baichuan 2](https://github.com/baichuan-inc) | Chinese<br>English | 7B-13B<br>including quantized versions | base model<br>chat model |
| Yuan2 | [Langchao Yuan series models](https://github.com/IEIT-Yuan) | Chinese<br>English | 2B-102B | instruct model |
| XVerse | [XVerse series models](https://github.com/xverse-ai) | Chinese<br>English | 7B-65B | base model<br>chat model<br>long text model<br>MoE model |
| LLaMA2 | [LLaMA2 series models](https://github.com/facebookresearch/llama) | English | 7B-70B<br>including quantized versions | base model<br>chat model |
| LLaMA3 | [LLaMA3 series models](https://github.com/meta-llama/llama3) | English | 8B-70B<br>including quantized versions | base model<br>chat model |
| Mistral<br>Mixtral | [Mistral series models](https://github.com/mistralai/mistral-src) | English | 7B-22B | base model<br>instruct model<br>MoE model |
| Yi<br>Yi1.5 | [01AI's YI series models](https://github.com/01-ai) | Chinese<br>English | 6B-34B<br>including quantized | base model<br>chat model<br>long text model |
| InternLM<br>InternLM2<br>InternLM2-Math | [Pujiang AI Lab InternLM series models](https://github.com/InternLM/InternLM) | Chinese<br>English | 1.8B-20B | base model<br>chat model<br>math model |
| DeepSeek<br>DeepSeek-MoE<br>DeepSeek-Coder<br>DeepSeek-Math<br>DeepSeek-V2<br>DeepSeek-Coder-V2 | [DeepSeek series models](https://github.com/deepseek-ai) | Chinese<br>English | 1.3B-236B | base model<br>chat model<br>MoE model<br>code model<br>math model |
| MAMBA | [MAMBA temporal convolution model](https://github.com/state-spaces/mamba) | English | 130M-2.8B | base model |
| Gemma | [Google Gemma series models](https://github.com/google/gemma_pytorch) | English | 2B-7B | base model<br>instruct model |
| MiniCPM | [OpenBmB MiniCPM series models](https://github.com/OpenBMB/MiniCPM) | Chinese<br>English | 2B-3B | chat model<br>MoE model |
| OpenBuddy | [OpenBuddy series models](https://github.com/OpenBuddy/OpenBuddy) | Chinese<br>English | 7B-70B | base model<br>chat model |
| Orion | [OrionStar AI series models](https://github.com/OrionStarAI) | Chinese<br>English | 14B | base model<br>chat model |
| BlueLM | [VIVO BlueLM large model](https://github.com/vivo-ai-lab/BlueLM) | Chinese<br>English | 7B | base model<br>chat model |
| Ziya2 | [Fengshenbang series models](https://github.com/IDEA-CCNL/Fengshenbang-LM) | Chinese<br>English | 13B | base model<br>chat model |
| Skywork | [Skywork series models](https://github.com/SkyworkAI/Skywork) | Chinese<br>English | 13B | base model<br>chat model |
| Zephyr | Zephyr series models based on Mistral | English | 7B | chat model |
| PolyLM | [Tongyi Lab self-developed PolyLM series models](https://github.com/DAMO-NLP-MT/PolyLM) | Multilingual | 13B | base model |
| SeqGPT | [Tongyi Lab self-developed text understanding model for information extraction and text classification](https://github.com/Alibaba-NLP/SeqGPT) | Chinese | 560M | semantic understanding model |
| SUS | [Southern University of Science and Technology model fine-tuned on YI](https://github.com/SUSTech-IDEA/SUS-Chat) | Chinese<br>English | 34B | chat model |
| Tongyi-Finance | [Tongyi finance series models](https://github.com/QwenLM/Qwen) | Chinese<br>English | 14B | base model<br>chat model<br>financial model |
| CodeFuse-CodeLLaMA<br>CodeFuse-Codegeex2<br>CodeFuse-Qwen | [Ant CodeFuse series models](https://github.com/codefuse-ai) | Chinese<br>English | 6B-34B | chat model<br>code model |
| phi2/phi3 | Microsoft's PHI series models | English | 3B/4B | base model<br>instruct model<br>code model |
| Grok | [X-ai](https://github.com/xai-org/grok-1) | English | 300B | base model |
| TeleChat | [Tele-AI](https://github.com/Tele-AI/Telechat) | Chinese<br>English | 7B-12B | chat model |
| dbrx | [databricks](https://github.com/databricks/dbrx) | English | 132B | base model<br>chat model |
| mengzi3 | [Langboat](https://github.com/Langboat/Mengzi3) | Chinese<br>English | 13B | base model |
| c4ai-command-r | [c4ai](https://cohere.com/command) | Multilingual | 35B-104B | chat model |
| WizardLM2 | [WizardLM2 series models](https://github.com/nlpxucan/WizardLM) | English | 7B-8x22B<br>including quantized versions | chat model<br>MoE model |
| Atom | [Atom](https://github.com/LlamaFamily/Llama-Chinese) | Chinese | 7B| base model<br>chat model|
| Chinese-LLaMA-Alpaca-2 | [Chinese-LLaMA-Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2) | Chinese | 1.3B-13B| base model<br>chat model<br>long text model |
| Chinese-LLaMA-Alpaca-3 | [Chinese-LLaMA-Alpaca-3](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3) | Chinese | 8B| base model<br>chat model|
| ModelScope-Agent | [ModelScope Agent series models](https://github.com/modelscope/modelscope-agent) | Chinese | 7B-14B| agent model |
#### MLLMs
| Model Type | Model Introduction | Language | Model Size | Model Type |
|------------------|------------------------------------------------------------------------|--------------------|-------------------|------------------- |
| Qwen-VL | [Tongyi Qwen vision model](https://github.com/QwenLM) | Chinese<br>English | 7B<br>including quantized versions | base model<br>chat model |
| Qwen-Audio | [Tongyi Qwen speech model](https://github.com/QwenLM) | Chinese<br>English | 7B | base model<br>chat model |
| YI-VL | [01AI's YI series vision models](https://github.com/01-ai) | Chinese<br>English | 6B-34B | chat model |
| XComposer2 | [Pujiang AI Lab InternLM vision model](https://github.com/InternLM/InternLM) | Chinese<br>English | 7B | chat model |
| DeepSeek-VL | [DeepSeek series vision models](https://github.com/deepseek-ai) | Chinese<br>English | 1.3B-7B | chat model |
| MiniCPM-V<br>MiniCPM-V-2<br>MiniCPM-V-2_5 | [OpenBmB MiniCPM vision model](https://github.com/OpenBMB/MiniCPM) | Chinese<br>English | 3B-9B | chat model |
| CogVLM<br>CogVLM2<br>CogAgent<br>GLM4V | [Zhipu ChatGLM visual QA and Agent model](https://github.com/THUDM/) | Chinese<br>English | 9B-19B | chat model |
| Llava1.5<br>Llava1.6 | [Llava series models](https://github.com/haotian-liu/LLaVA) | English | 7B-34B | chat model |
| Llava-Next | [Llava-Next series models](https://github.com/LLaVA-VL/LLaVA-NeXT) | Chinese<br>English | 8B-110B | chat model |
| mPLUG-Owl | [mPLUG-Owl series models](https://github.com/X-PLUG/mPLUG-Owl) | English | 11B | chat model |
| InternVL | [InternVL](https://github.com/OpenGVLab/InternVL) | Chinese<br>English | 2B-25.5B<br>including quantized version | chat model |
| Llava-llama3 | [xtuner](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers) | English | 8B | chat model |
| Phi3-Vision | Microsoft | English | 4B | chat model |
| PaliGemma | Google | English | 3B | chat model |
#### Diffusion Models
| Model Type | Model Introduction | Language | Model Type |
|---------------------|----------------------------------------------------------------------|----------|------------------ |
| AnimateDiff | [AnimateDiff animation model](https://github.com/guoyww/AnimateDiff) | English | text-to-video |
| SD1.5/SD2.0/SDXL | [StabilityAI series diffusion models](https://github.com/Stability-AI) | English | text-to-image |
### Supported Open Source Datasets
| Dataset Type | Training Task | Documentation |
|--------------|:---------------|--------------------------------------------------------------- |
| General | Fine-tuning | 🔥ruozhiba, 🔥ms-bench, 🔥alpaca-en(gpt4), 🔥alpaca-zh(gpt4), multi-alpaca, instinwild, cot-en, cot-zh, firefly-zh, instruct-en, gpt4all-en, sharegpt, tulu-v2-sft-mixture, wikipedia-zh, open-orca, sharegpt-gpt4, deepctrl-sft, coig-cqia. |
| Agent | Fine-tuning | 🔥ms-agent, 🔥ms-agent-for-agentfabric, ms-agent-multirole, 🔥toolbench-for-alpha-umi, damo-agent-zh, damo-agent-zh-mini, agent-instruct-all-en. |
| General | Human Alignment | hh-rlhf, 🔥hh-rlhf-cn, stack-exchange-paired. |
| Code | Fine-tuning | code-alpaca-en, 🔥leetcode-python-en, 🔥codefuse-python-en, 🔥codefuse-evol-instruction-zh. |
| Medical | Fine-tuning | medical-en, medical-zh, 🔥disc-med-sft-zh. |
| Legal | Fine-tuning | lawyer-llama-zh, tigerbot-law-zh, 🔥disc-law-sft-zh. |
| Math | Fine-tuning | 🔥blossom-math-zh, school-math-zh, open-platypus-en. |
| SQL | Fine-tuning | text2sql-en, 🔥sql-create-context-en. |
| Text Generation | Fine-tuning | 🔥advertise-gen-zh, 🔥dureader-robust-zh. |
| Classification | Fine-tuning | cmnli-zh, 🔥jd-sentiment-zh, 🔥hc3-zh, 🔥hc3-en. |
| Quantization Assist | Quantization | pileval. |
| Other | Fine-tuning | finance-en, poetry-zh, webnovel-zh, generated-chat-zh, cls-fudan-news-zh, ner-jave-zh. |
| Vision | Fine-tuning | coco-en, 🔥coco-en-mini, coco-en-2, coco-en-2-mini, capcha-images. |
| Audio | Fine-tuning | aishell1-zh, 🔥aishell1-zh-mini. |
### Supported Technologies
| Technology Name |
| ------------------------------------------------------------ |
| 🔥LoRA: [LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/abs/2106.09685) |
| 🔥LoRA+: [LoRA+: Efficient Low Rank Adaptation of Large Models](https://arxiv.org/pdf/2402.12354.pdf) |
| 🔥GaLore:[GaLore: Memory-Efficient LLM Training by Gradient Low-Rank Projection](https://arxiv.org/abs/2403.03507) |
| 🔥LISA: [LISA: Layerwise Importance Sampling for Memory-Efficient Large Language Model Fine-Tuning](https://arxiv.org/abs/2403.17919) |
| 🔥UnSloth: https://github.com/unslothai/unsloth |
| 🔥LLaMA PRO: [LLAMA PRO: Progressive LLaMA with Block Expansion](https://arxiv.org/pdf/2401.02415.pdf) |
| 🔥SCEdit: [SCEdit: Efficient and Controllable Image Diffusion Generation via Skip Connection Editing](https://arxiv.org/abs/2312.11392) < [arXiv](https://arxiv.org/abs/2312.11392) \ |
| 🔥NEFTune: [Noisy Embeddings Improve Instruction Finetuning](https://arxiv.org/abs/2310.05914) |
| LongLoRA: [Efficient Fine-tuning of Long-Context Large Language Models](https://arxiv.org/abs/2309.12307) |
| Adapter: [Parameter-Efficient Transfer Learning for NLP](http://arxiv.org/abs/1902.00751) |
| Vision Prompt Tuning: [Visual Prompt Tuning](https://arxiv.org/abs/2203.12119) |
| Side: [Side-Tuning: A Baseline for Network Adaptation via Additive Side Networks](https://arxiv.org/abs/1912.13503) |
| Res-Tuning: [Res-Tuning: A Flexible and Efficient Tuning Paradigm via Unbinding Tuner from Backbone](https://arxiv.org/abs/2310.19859) < [arXiv](https://arxiv.org/abs/2310.19859) \ |
| Tuners provided by [PEFT](https://github.com/huggingface/peft), such as IA3, AdaLoRA, etc. |
### Supported Hardware
| Hardware Environment | Notes |
|--------------------------------|-------------------------------------------------|
| CPU | |
| RTX 20/30/40 series, etc. | After 30 series, BF16 and FlashAttn can be used |
| Computing cards T4/V100, etc. | BF16 and FlashAttn not supported |
| Computing cards A10/A100, etc. | Support BF16 and FlashAttn |
| Huawei Ascend NPU | |
## 📃 Documentation
### Documentation Compiling
```shell
make docs
# Check docs/build/html/index.html in web-browser
```
### User Guide
| Document Name |
| ------------------------------------------------------------ |
| [Using Web-UI](docs/source_en/GetStarted/Web-ui.md) |
| [Using Tuners](docs/source_en/GetStarted/Tuners.md) |
| [LLM Inference](docs/source_en/LLM/LLM-inference.md) |
| [LLM Fine-tuning](docs/source_en/LLM/LLM-fine-tuning.md) |
| [LLM Evaluation](docs/source_en/LLM/LLM-eval.md) |
| [LLM Quantization](docs/source_en/LLM/LLM-quantization.md) |
| [LLM Deployment](docs/source_en/LLM/VLLM-inference-acceleration-and-deployment.md) |
| [AnimateDiff Training](docs/source_en/AIGC/AnimateDiff-train-infer.md) |
| [Human Preference Alignment Training Documentation](docs/source_en/LLM/Human-Preference-Alignment-Training-Documentation.md) |
### Reference Documentation
| Document Name |
| ------------------------------------------------------------ |
| [Command Line Arguments](docs/source_en/LLM/Command-line-parameters.md) |
| [Supported Models and Datasets List](docs/source_en/LLM/Supported-models-datasets.md) |
| [Customizing New Models and Datasets](docs/source_en/LLM/Customization.md) |
| [Runtime Speed and Memory Benchmark](docs/source_en/LLM/Benchmark.md) |
### Best Practices
| Best Practices Name |
| ------------------------------------------------------------ |
| [Agent Fine-Tuning Best Practice](docs/source_en/LLM/Agent-fine-tuning-best-practice.md) |
| [Agent Deployment Best Practice](docs/source_en/LLM/Agent-deployment-best-practice.md) |
| [Self-Cognition Fine-Tuning Best Practice](docs/source_en/LLM/Self-cognition-best-practice.md) |
| [Qwen1.5 Best Practice](docs/source_en/LLM/Qwen1.5-best-practice.md) |
| [Multi-Modal Model Training Best Practice](docs/source_en/Multi-Modal/index.md) |
| [NPU Best Practice](docs/source_en/LLM/NPU-best-practice.md) |
| [DPO Human Alignment Training](docs/source_en/LLM/DPO.md) |
| [ORPO Human Alignment Training](docs/source_en/LLM/ORPO.md) |
| [SimPO Human Alignment Training](docs/source_en/LLM/SimPO.md) |
### Deep Learning Tutorials
| Tutorial Name |
|-------------------------------------------------------------- |
| [Introduction to Deep Learning](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/A.%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8%E4%BB%8B%E7%BB%8D.md) |
| [Large Model Basics](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/B.%E9%AD%94%E6%90%AD%E7%A4%BE%E5%8C%BA%E5%92%8CLLM%E5%A4%A7%E6%A8%A1%E5%9E%8B%E5%9F%BA%E7%A1%80%E7%9F%A5%E8%AF%86.md) |
| [Prompt Engineering](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/C.%E6%8F%90%E7%A4%BA%E8%AF%8D%E5%B7%A5%E7%A8%8B-prompt%20engineering.md) |
| [Transformer Architecture Introduction](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/D.Transformer%E7%BB%93%E6%9E%84.md) |
| [Training Technique Selection](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/E.%E6%8A%80%E6%9C%AF%E9%80%89%E5%9E%8B.md) |
| [Data Preprocessing](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/F.%E6%95%B0%E6%8D%AE%E9%A2%84%E5%A4%84%E7%90%86.md) |
| [Quantization](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/G.%E9%87%8F%E5%8C%96.md) |
| [Training](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/H.%E8%AE%AD%E7%BB%83.md) |
| [Inference](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/I.LLM%E5%92%8C%E5%A4%9A%E6%A8%A1%E6%80%81%E6%A8%A1%E5%9E%8B%E9%AB%98%E6%95%88%E6%8E%A8%E7%90%86%E5%AE%9E%E8%B7%B5.md) |
| [Deployment](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/J.%E9%83%A8%E7%BD%B2.md) |
| [Evaluation](https://github.com/modelscope/modelscope-classroom/blob/main/LLM-tutorial/K.%E5%A4%A7%E6%A8%A1%E5%9E%8B%E8%87%AA%E5%8A%A8%E8%AF%84%E4%BC%B0%E7%90%86%E8%AE%BA%E5%92%8C%E5%AE%9E%E6%88%98--LLM%20Automatic%20Evaluation.md) |
## 🏛 License
This framework is licensed under the [Apache License (Version 2.0)](https://github.com/modelscope/modelscope/blob/master/LICENSE). For models and datasets, please refer to the original resource page and follow the corresponding License.
## 📎 Citation
```bibtex
@Misc{swift,
title = {SWIFT:Scalable lightWeight Infrastructure for Fine-Tuning},
author = {The ModelScope Team},
howpublished = {\url{https://github.com/modelscope/swift}},
year = {2024}
}
```
## ☎ Wechat Group
You can contact us and communicate with us by adding our WeChat group:
<p align="left">
<img src="asset/wechat.png" width="250" style="display: inline-block;">
</p>
## Star History
[](https://star-history.com/#modelscope/swift&Date)
| swift/ms_swift.egg-info/PKG-INFO/0 | {
"file_path": "swift/ms_swift.egg-info/PKG-INFO",
"repo_id": "swift",
"token_count": 26789
} | 189 |
{
"chat_format": "chatml",
"do_sample": true,
"eos_token_id": 151645,
"max_new_tokens": 2048,
"max_window_size": 24000,
"pad_token_id": 151645,
"temperature": 0.3,
"top_k": 20,
"top_p": 0.7,
"transformers_version": "4.41.2"
}
| swift/output/qwen-7b-chat/v1-20240626-092716/checkpoint-12/generation_config.json/0 | {
"file_path": "swift/output/qwen-7b-chat/v1-20240626-092716/checkpoint-12/generation_config.json",
"repo_id": "swift",
"token_count": 121
} | 190 |
from dataclasses import dataclass
from swift.llm import get_default_template_type, get_template, get_vllm_engine, inference_vllm
from swift.utils import get_main
@dataclass
class VLLMTestArgs:
model_type: str
def test_vllm(args: VLLMTestArgs) -> None:
model_type = args.model_type
llm_engine = get_vllm_engine(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
llm_engine.generation_config.max_new_tokens = 256
request_list = [{'query': '你好!'}, {'query': '浙江的省会在哪?'}]
resp_list = inference_vllm(llm_engine, template, request_list)
for request, resp in zip(request_list, resp_list):
print(f"query: {request['query']}")
print(f"response: {resp['response']}")
test_vllm_main = get_main(VLLMTestArgs, test_vllm)
if __name__ == '__main__':
test_vllm_main()
| swift/scripts/tests/test_vllm.py/utils.py/0 | {
"file_path": "swift/scripts/tests/test_vllm.py/utils.py",
"repo_id": "swift",
"token_count": 377
} | 191 |
#!/usr/bin/env python
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning script for Stable Diffusion XL for text2image."""
import argparse
import functools
import gc
import logging
import math
import os
import random
import shutil
from pathlib import Path
import accelerate
import datasets
import diffusers
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from datasets import load_dataset
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel, compute_snr
from diffusers.utils import is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from modelscope import AutoTokenizer, MsDataset
from packaging import version
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm.auto import tqdm
from transformers import PretrainedConfig
from swift import push_to_hub, snapshot_download
logger = get_logger(__name__)
DATASET_NAME_MAPPING = {
'AI-ModelScope/pokemon-blip-captions': ('text', 'image:FILE'),
}
def save_model_card(
repo_id: str,
images=None,
validation_prompt=None,
base_model=str,
dataset_name=str,
repo_folder=None,
vae_path=None,
):
img_str = ''
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f'image_{i}.png'))
img_str += f'\n'
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
dataset: {dataset_name}
tags:
- stable-diffusion-xl
- stable-diffusion-xl-diffusers
- text-to-image
- diffusers
inference: true
---
"""
model_card = f"""
# Text-to-image finetuning - {repo_id}
This pipeline was finetuned from **{base_model}** on the **{args.dataset_name}** dataset. Below are some example images
generated with the finetuned pipeline using the following prompt: {validation_prompt}: \n
{img_str}
Special VAE used for training: {vae_path}.
"""
with open(os.path.join(repo_folder, 'README.md'), 'w') as f:
f.write(yaml + model_card)
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str,
revision: str,
subfolder: str = 'text_encoder'):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path, subfolder=subfolder, revision=revision)
model_class = text_encoder_config.architectures[0]
if model_class == 'CLIPTextModel':
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == 'CLIPTextModelWithProjection':
from transformers import CLIPTextModelWithProjection
return CLIPTextModelWithProjection
else:
raise ValueError(f'{model_class} is not supported.')
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description='Simple example of a training script.')
parser.add_argument(
'--pretrained_model_name_or_path',
type=str,
default=None,
required=True,
help='Path to pretrained model or model identifier from huggingface.co/models or modelscope.cn/models.',
)
parser.add_argument(
'--pretrained_vae_model_name_or_path',
type=str,
default=None,
help='Path to pretrained VAE model with better numerical stability. \
More details: https://github.com/huggingface/diffusers/pull/4038.',
)
parser.add_argument(
'--revision',
type=str,
default=None,
required=False,
help='Revision of pretrained model identifier from huggingface.co/models or modelscope.cn/models.',
)
parser.add_argument(
'--variant',
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)
parser.add_argument(
'--dataset_name',
type=str,
default=None,
help=('The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,'
' dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,'
' or to a folder containing files that 🤗 Datasets can understand.'),
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
'--train_data_dir',
type=str,
default=None,
help=('A folder containing the training data. Folder contents must follow the structure described in'
' https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file'
' must exist to provide the captions for the images. Ignored if `dataset_name` is specified.'),
)
parser.add_argument(
'--image_column', type=str, default='image:FILE', help='The column of the dataset containing an image.')
parser.add_argument(
'--caption_column',
type=str,
default='text',
help='The column of the dataset containing a caption or a list of captions.',
)
parser.add_argument(
'--validation_prompt',
type=str,
default=None,
help='A prompt that is used during validation to verify that the model is learning.',
)
parser.add_argument(
'--num_validation_images',
type=int,
default=4,
help='Number of images that should be generated during validation with `validation_prompt`.',
)
parser.add_argument(
'--validation_epochs',
type=int,
default=1,
help=('Run fine-tuning validation every X epochs. The validation process consists of running the prompt'
' `args.validation_prompt` multiple times: `args.num_validation_images`.'),
)
parser.add_argument(
'--max_train_samples',
type=int,
default=None,
help=('For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'),
)
parser.add_argument(
'--proportion_empty_prompts',
type=float,
default=0,
help='Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).',
)
parser.add_argument(
'--output_dir',
type=str,
default='sdxl-model-finetuned',
help='The output directory where the model predictions and checkpoints will be written.',
)
parser.add_argument(
'--cache_dir',
type=str,
default=None,
help='The directory where the downloaded models and datasets will be stored.',
)
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument(
'--resolution',
type=int,
default=1024,
help=('The resolution for input images, all the images in the train/validation dataset will be resized to this'
' resolution'),
)
parser.add_argument(
'--center_crop',
default=False,
action='store_true',
help=('Whether to center crop the input images to the resolution. If not set, the images will be randomly'
' cropped. The images will be resized to the resolution first before cropping.'),
)
parser.add_argument(
'--random_flip',
action='store_true',
help='whether to randomly flip images horizontally',
)
parser.add_argument(
'--train_batch_size', type=int, default=16, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--num_train_epochs', type=int, default=100)
parser.add_argument(
'--max_train_steps',
type=int,
default=None,
help='Total number of training steps to perform. If provided, overrides num_train_epochs.',
)
parser.add_argument(
'--checkpointing_steps',
type=int,
default=500,
help=('Save a checkpoint of the training state every X updates. These checkpoints can be used both as final'
' checkpoints in case they are better than the last checkpoint, and are also suitable for resuming'
' training using `--resume_from_checkpoint`.'),
)
parser.add_argument(
'--checkpoints_total_limit',
type=int,
default=None,
help=('Max number of checkpoints to store.'),
)
parser.add_argument(
'--resume_from_checkpoint',
type=str,
default=None,
help=('Whether training should be resumed from a previous checkpoint. Use a path saved by'
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'),
)
parser.add_argument(
'--gradient_accumulation_steps',
type=int,
default=1,
help='Number of updates steps to accumulate before performing a backward/update pass.',
)
parser.add_argument(
'--gradient_checkpointing',
action='store_true',
help='Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.',
)
parser.add_argument(
'--learning_rate',
type=float,
default=1e-4,
help='Initial learning rate (after the potential warmup period) to use.',
)
parser.add_argument(
'--scale_lr',
action='store_true',
default=False,
help='Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.',
)
parser.add_argument(
'--lr_scheduler',
type=str,
default='constant',
help=('The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'),
)
parser.add_argument(
'--lr_warmup_steps', type=int, default=500, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument(
'--timestep_bias_strategy',
type=str,
default='none',
choices=['earlier', 'later', 'range', 'none'],
help=(
'The timestep bias strategy, which may help direct the model toward learning low or high frequency details.'
" Choices: ['earlier', 'later', 'range', 'none']."
" The default is 'none', which means no bias is applied, and training proceeds normally."
" The value of 'later' will increase the frequency of the model's final training timesteps."),
)
parser.add_argument(
'--timestep_bias_multiplier',
type=float,
default=1.0,
help=('The multiplier for the bias. Defaults to 1.0, which means no bias is applied.'
' A value of 2.0 will double the weight of the bias, and a value of 0.5 will halve it.'),
)
parser.add_argument(
'--timestep_bias_begin',
type=int,
default=0,
help=('When using `--timestep_bias_strategy=range`, the beginning (inclusive) timestep to bias.'
' Defaults to zero, which equates to having no specific bias.'),
)
parser.add_argument(
'--timestep_bias_end',
type=int,
default=1000,
help=('When using `--timestep_bias_strategy=range`, the final timestep (inclusive) to bias.'
' Defaults to 1000, which is the number of timesteps that Stable Diffusion is trained on.'),
)
parser.add_argument(
'--timestep_bias_portion',
type=float,
default=0.25,
help=('The portion of timesteps to bias. Defaults to 0.25, which 25% of timesteps will be biased.'
' A value of 0.5 will bias one half of the timesteps. '
'The value provided for `--timestep_bias_strategy` determines'
' whether the biased portions are in the earlier or later timesteps.'),
)
parser.add_argument(
'--snr_gamma',
type=float,
default=None,
help='SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. '
'More details here: https://arxiv.org/abs/2303.09556.',
)
parser.add_argument('--use_ema', action='store_true', help='Whether to use EMA model.')
parser.add_argument(
'--allow_tf32',
action='store_true',
help=('Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see'
' https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices'),
)
parser.add_argument(
'--dataloader_num_workers',
type=int,
default=0,
help=(
'Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.'
),
)
parser.add_argument(
'--use_8bit_adam', action='store_true', help='Whether or not to use 8-bit Adam from bitsandbytes.')
parser.add_argument('--adam_beta1', type=float, default=0.9, help='The beta1 parameter for the Adam optimizer.')
parser.add_argument('--adam_beta2', type=float, default=0.999, help='The beta2 parameter for the Adam optimizer.')
parser.add_argument('--adam_weight_decay', type=float, default=1e-2, help='Weight decay to use.')
parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon value for the Adam optimizer')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_token', type=str, default=None, help='The token to use to push to the Model Hub.')
parser.add_argument(
'--prediction_type',
type=str,
default=None,
help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or \
leave `None`. If left to `None` the default prediction type of the scheduler: \
`noise_scheduler.config.prediciton_type` is chosen.",
)
parser.add_argument(
'--hub_model_id',
type=str,
default=None,
help='The name of the repository to keep in sync with the local `output_dir`.',
)
parser.add_argument(
'--logging_dir',
type=str,
default='logs',
help=('[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to'
' *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***.'),
)
parser.add_argument(
'--report_to',
type=str,
default='tensorboard',
help=('The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'),
)
parser.add_argument(
'--mixed_precision',
type=str,
default=None,
choices=['no', 'fp16', 'bf16'],
help=(
'Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >='
' 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the'
' flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config.'),
)
parser.add_argument('--local_rank', type=int, default=-1, help='For distributed training: local_rank')
parser.add_argument(
'--enable_xformers_memory_efficient_attention', action='store_true', help='Whether or not to use xformers.')
parser.add_argument('--noise_offset', type=float, default=0, help='The scale of noise offset.')
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get('LOCAL_RANK', -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# Sanity checks
if args.dataset_name is None and args.train_data_dir is None:
raise ValueError('Need either a dataset name or a training folder.')
if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1:
raise ValueError('`--proportion_empty_prompts` must be in the range [0, 1].')
args.base_model_id = args.pretrained_model_name_or_path
if not os.path.exists(args.pretrained_model_name_or_path):
args.pretrained_model_name_or_path = snapshot_download(
args.pretrained_model_name_or_path, revision=args.revision)
args.vae_base_model_id = args.pretrained_vae_model_name_or_path
if args.pretrained_vae_model_name_or_path and not os.path.exists(args.pretrained_vae_model_name_or_path):
args.pretrained_vae_model_name_or_path = snapshot_download(args.pretrained_vae_model_name_or_path)
return args
# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
def encode_prompt(batch, text_encoders, tokenizers, proportion_empty_prompts, caption_column, is_train=True):
prompt_embeds_list = []
prompt_batch = batch[caption_column]
captions = []
for caption in prompt_batch:
if random.random() < proportion_empty_prompts:
captions.append('')
elif isinstance(caption, str):
captions.append(caption)
elif isinstance(caption, (list, np.ndarray)):
# take a random caption if there are multiple
captions.append(random.choice(caption) if is_train else caption[0])
with torch.no_grad():
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
text_inputs = tokenizer(
captions,
padding='max_length',
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors='pt',
)
text_input_ids = text_inputs.input_ids
prompt_embeds = text_encoder(
text_input_ids.to(text_encoder.device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
bs_embed, seq_len, _ = prompt_embeds.shape
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
return {'prompt_embeds': prompt_embeds.cpu(), 'pooled_prompt_embeds': pooled_prompt_embeds.cpu()}
def compute_vae_encodings(batch, vae):
images = batch.pop('pixel_values')
pixel_values = torch.stack(list(images))
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
pixel_values = pixel_values.to(vae.device, dtype=vae.dtype)
with torch.no_grad():
model_input = vae.encode(pixel_values).latent_dist.sample()
model_input = model_input * vae.config.scaling_factor
return {'model_input': model_input.cpu()}
def generate_timestep_weights(args, num_timesteps):
weights = torch.ones(num_timesteps)
# Determine the indices to bias
num_to_bias = int(args.timestep_bias_portion * num_timesteps)
if args.timestep_bias_strategy == 'later':
bias_indices = slice(-num_to_bias, None)
elif args.timestep_bias_strategy == 'earlier':
bias_indices = slice(0, num_to_bias)
elif args.timestep_bias_strategy == 'range':
# Out of the possible 1000 timesteps, we might want to focus on eg. 200-500.
range_begin = args.timestep_bias_begin
range_end = args.timestep_bias_end
if range_begin < 0:
raise ValueError(
'When using the range strategy for timestep bias, you must provide a beginning timestep greater \
or equal to zero.')
if range_end > num_timesteps:
raise ValueError(
'When using the range strategy for timestep bias, you must provide an ending timestep smaller than \
the number of timesteps.')
bias_indices = slice(range_begin, range_end)
else: # 'none' or any other string
return weights
if args.timestep_bias_multiplier <= 0:
return ValueError(
'The parameter --timestep_bias_multiplier is not intended to be used to disable the training of specific '
'timesteps.'
' If it was intended to disable timestep bias, use `--timestep_bias_strategy none` instead.'
' A timestep bias multiplier less than or equal to 0 is not allowed.')
# Apply the bias
weights[bias_indices] *= args.timestep_bias_multiplier
# Normalize
weights /= weights.sum()
return weights
def main():
args = parse_args()
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_config=accelerator_project_config,
)
if args.report_to == 'wandb':
if not is_wandb_available():
raise ImportError('Make sure to install wandb if you want to use it for logging during training.')
import wandb
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load the tokenizers
tokenizer_one = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder='tokenizer',
revision=args.revision,
use_fast=False,
)
tokenizer_two = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder='tokenizer_2',
revision=args.revision,
use_fast=False,
)
# import correct text encoder classes
text_encoder_cls_one = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
text_encoder_cls_two = import_model_class_from_model_name_or_path(
args.pretrained_model_name_or_path, args.revision, subfolder='text_encoder_2')
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
# Check for terminal SNR in combination with SNR Gamma
text_encoder_one = text_encoder_cls_one.from_pretrained(
args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision, variant=args.variant)
text_encoder_two = text_encoder_cls_two.from_pretrained(
args.pretrained_model_name_or_path, subfolder='text_encoder_2', revision=args.revision, variant=args.variant)
vae_path = (
args.pretrained_model_name_or_path
if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path)
vae = AutoencoderKL.from_pretrained(
vae_path,
subfolder='vae' if args.pretrained_vae_model_name_or_path is None else None,
revision=args.revision,
variant=args.variant,
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision, variant=args.variant)
# Freeze vae and text encoders.
vae.requires_grad_(False)
text_encoder_one.requires_grad_(False)
text_encoder_two.requires_grad_(False)
# Set unet as trainable.
unet.train()
# For mixed precision training we cast all non-trainable weigths to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == 'fp16':
weight_dtype = torch.float16
elif accelerator.mixed_precision == 'bf16':
weight_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to weight_dtype
# The VAE is in float32 to avoid NaN losses.
vae.to(accelerator.device, dtype=torch.float32)
text_encoder_one.to(accelerator.device, dtype=weight_dtype)
text_encoder_two.to(accelerator.device, dtype=weight_dtype)
# Create EMA for the unet.
if args.use_ema:
ema_unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision, variant=args.variant)
ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse('0.0.16'):
logger.warn(
'xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training,'
' please update xFormers to at least 0.0.17. '
'See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details.')
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError('xformers is not available. Make sure it is installed correctly')
# `accelerate` 0.16.0 will have better support for customized saving
if version.parse(accelerate.__version__) >= version.parse('0.16.0'):
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
if args.use_ema:
ema_unet.save_pretrained(os.path.join(output_dir, 'unet_ema'))
for i, model in enumerate(models):
model.save_pretrained(os.path.join(output_dir, 'unet'))
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
def load_model_hook(models, input_dir):
if args.use_ema:
load_model = EMAModel.from_pretrained(os.path.join(input_dir, 'unet_ema'), UNet2DConditionModel)
ema_unet.load_state_dict(load_model.state_dict())
ema_unet.to(accelerator.device)
del load_model
for i in range(len(models)):
# pop models so that they are not loaded again
model = models.pop()
# load diffusers style into model
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder='unet')
model.register_to_config(**load_model.config)
model.load_state_dict(load_model.state_dict())
del load_model
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes)
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.')
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Optimizer creation
params_to_optimize = unet.parameters()
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Get the datasets: you can either provide your own training and evaluation files (see below)
# or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
def path_to_img(example):
example['image'] = Image.open(example['image:FILE'])
return example
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = MsDataset.load(
args.dataset_name,
args.dataset_config_name,
data_dir=args.train_data_dir,
)
if not isinstance(dataset, dict):
dataset = {'train': dataset}
else:
data_files = {}
if args.train_data_dir is not None:
data_files['train'] = os.path.join(args.train_data_dir, '**')
dataset = load_dataset(
'imagefolder',
data_files=data_files,
cache_dir=args.cache_dir,
)
# See more about loading custom images at
# https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
column_names = dataset['train'].column_names
# 6. Get the column names for input/target.
dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None)
if args.image_column is None:
image_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
image_column = args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}")
if args.caption_column is None:
caption_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
caption_column = args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}")
if image_column.endswith(':FILE'):
dataset['train'] = dataset['train'].map(path_to_img)
image_column = 'image'
# Preprocessing the datasets.
train_resize = transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR)
train_crop = transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution)
train_flip = transforms.RandomHorizontalFlip(p=1.0)
train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
def preprocess_train(examples):
images = [image.convert('RGB') for image in examples[image_column]]
# image aug
original_sizes = []
all_images = []
crop_top_lefts = []
for image in images:
original_sizes.append((image.height, image.width))
image = train_resize(image)
if args.center_crop:
y1 = max(0, int(round((image.height - args.resolution) / 2.0)))
x1 = max(0, int(round((image.width - args.resolution) / 2.0)))
image = train_crop(image)
else:
y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution))
image = crop(image, y1, x1, h, w)
if args.random_flip and random.random() < 0.5:
# flip
x1 = image.width - x1
image = train_flip(image)
crop_top_left = (y1, x1)
crop_top_lefts.append(crop_top_left)
image = train_transforms(image)
all_images.append(image)
examples['original_sizes'] = original_sizes
examples['crop_top_lefts'] = crop_top_lefts
examples['pixel_values'] = all_images
return examples
with accelerator.main_process_first():
if args.max_train_samples is not None:
dataset['train'] = dataset['train'].shuffle(seed=args.seed).select(range(args.max_train_samples))
# Set the training transforms
train_dataset = dataset['train'].with_transform(preprocess_train)
# Let's first compute all the embeddings so that we can free up the text encoders
# from memory. We will pre-compute the VAE encodings too.
text_encoders = [text_encoder_one, text_encoder_two]
tokenizers = [tokenizer_one, tokenizer_two]
compute_embeddings_fn = functools.partial(
encode_prompt,
text_encoders=text_encoders,
tokenizers=tokenizers,
proportion_empty_prompts=args.proportion_empty_prompts,
caption_column=args.caption_column,
)
compute_vae_encodings_fn = functools.partial(compute_vae_encodings, vae=vae)
with accelerator.main_process_first():
from datasets.fingerprint import Hasher
# fingerprint used by the cache for the other processes to load the result
# details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401
new_fingerprint = Hasher.hash(args)
new_fingerprint_for_vae = Hasher.hash('vae')
train_dataset = train_dataset.map(compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint)
train_dataset = train_dataset.map(
compute_vae_encodings_fn,
batched=True,
batch_size=args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps,
new_fingerprint=new_fingerprint_for_vae,
)
del text_encoders, tokenizers, vae
gc.collect()
torch.cuda.empty_cache()
def collate_fn(examples):
model_input = torch.stack([torch.tensor(example['model_input']) for example in examples])
original_sizes = [example['original_sizes'] for example in examples]
crop_top_lefts = [example['crop_top_lefts'] for example in examples]
prompt_embeds = torch.stack([torch.tensor(example['prompt_embeds']) for example in examples])
pooled_prompt_embeds = torch.stack([torch.tensor(example['pooled_prompt_embeds']) for example in examples])
return {
'model_input': model_input,
'prompt_embeds': prompt_embeds,
'pooled_prompt_embeds': pooled_prompt_embeds,
'original_sizes': original_sizes,
'crop_top_lefts': crop_top_lefts,
}
# DataLoaders creation:
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
shuffle=True,
collate_fn=collate_fn,
batch_size=args.train_batch_size,
num_workers=args.dataloader_num_workers,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
# Prepare everything with our `accelerator`.
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader,
lr_scheduler)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers('text2image-fine-tune-sdxl', config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != 'latest':
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=lambda x: int(x.split('-')[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc='Steps',
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
for epoch in range(first_epoch, args.num_train_epochs):
train_loss = 0.0
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet):
# Sample noise that we'll add to the latents
model_input = batch['model_input'].to(accelerator.device)
noise = torch.randn_like(model_input)
if args.noise_offset:
# https://www.crosslabs.org//blog/diffusion-with-offset-noise
noise += args.noise_offset * torch.randn(
(model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device)
bsz = model_input.shape[0]
if args.timestep_bias_strategy == 'none':
# Sample a random timestep for each image without bias.
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz, ), device=model_input.device)
else:
# Sample a random timestep for each image, potentially biased by the timestep weights.
# Biasing the timestep weights allows us to spend less time training irrelevant timesteps.
weights = generate_timestep_weights(args, noise_scheduler.config.num_train_timesteps).to(
model_input.device)
timesteps = torch.multinomial(weights, bsz, replacement=True).long()
# Add noise to the model input according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
# time ids
def compute_time_ids(original_size, crops_coords_top_left):
# Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
target_size = (args.resolution, args.resolution)
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_time_ids = torch.tensor([add_time_ids])
add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
return add_time_ids
add_time_ids = torch.cat(
[compute_time_ids(s, c) for s, c in zip(batch['original_sizes'], batch['crop_top_lefts'])])
# Predict the noise residual
unet_added_conditions = {'time_ids': add_time_ids}
prompt_embeds = batch['prompt_embeds'].to(accelerator.device)
pooled_prompt_embeds = batch['pooled_prompt_embeds'].to(accelerator.device)
unet_added_conditions.update({'text_embeds': pooled_prompt_embeds})
model_pred = unet(
noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs=unet_added_conditions).sample
# Get the target for loss depending on the prediction type
if args.prediction_type is not None:
# set prediction_type of scheduler if defined
noise_scheduler.register_to_config(prediction_type=args.prediction_type)
if noise_scheduler.config.prediction_type == 'epsilon':
target = noise
elif noise_scheduler.config.prediction_type == 'v_prediction':
target = noise_scheduler.get_velocity(model_input, noise, timesteps)
elif noise_scheduler.config.prediction_type == 'sample':
# We set the target to latents here, but the model_pred will return the noise sample prediction.
target = model_input
# We will have to subtract the noise residual from the prediction to get the target sample.
model_pred = model_pred - noise
else:
raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}')
if args.snr_gamma is None:
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
else:
# Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
# Since we predict the noise instead of x_0, the original formulation is slightly changed.
# This is discussed in Section 4.2 of the same paper.
snr = compute_snr(noise_scheduler, timesteps)
if noise_scheduler.config.prediction_type == 'v_prediction':
# Velocity objective requires that we add one to SNR values before we divide by them.
snr = snr + 1
mse_loss_weights = (
torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr)
loss = F.mse_loss(model_pred.float(), target.float(), reduction='none')
loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
loss = loss.mean()
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
train_loss += avg_loss.item() / args.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = unet.parameters()
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
accelerator.log({'train_loss': train_loss}, step=global_step)
train_loss = 0.0
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith('checkpoint')]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split('-')[1]))
# before we save the new checkpoint, we need to have at _most_ \
# `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(f'{len(checkpoints)} checkpoints already exist, '
f'removing {len(removing_checkpoints)} checkpoints')
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}')
accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
logs = {'step_loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
if global_step >= args.max_train_steps:
break
if accelerator.is_main_process:
if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
logger.info(f'Running validation... \n Generating {args.num_validation_images} images with prompt:'
f' {args.validation_prompt}.')
if args.use_ema:
# Store the UNet parameters temporarily and load the EMA parameters to perform inference.
ema_unet.store(unet.parameters())
ema_unet.copy_to(unet.parameters())
# create pipeline
vae = AutoencoderKL.from_pretrained(
vae_path,
subfolder='vae' if args.pretrained_vae_model_name_or_path is None else None,
revision=args.revision,
variant=args.variant,
)
pipeline = StableDiffusionXLPipeline.from_pretrained(
args.pretrained_model_name_or_path,
vae=vae,
unet=accelerator.unwrap_model(unet),
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
if args.prediction_type is not None:
scheduler_args = {'prediction_type': args.prediction_type}
pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
pipeline_args = {'prompt': args.validation_prompt}
with torch.cuda.amp.autocast():
images = [
pipeline(**pipeline_args, generator=generator, num_inference_steps=25).images[0]
for _ in range(args.num_validation_images)
]
for tracker in accelerator.trackers:
if tracker.name == 'tensorboard':
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images('validation', np_images, epoch, dataformats='NHWC')
if tracker.name == 'wandb':
tracker.log({
'validation': [
wandb.Image(image, caption=f'{i}: {args.validation_prompt}')
for i, image in enumerate(images)
]
})
del pipeline
torch.cuda.empty_cache()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = accelerator.unwrap_model(unet)
if args.use_ema:
ema_unet.copy_to(unet.parameters())
# Serialize pipeline.
vae = AutoencoderKL.from_pretrained(
vae_path,
subfolder='vae' if args.pretrained_vae_model_name_or_path is None else None,
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
pipeline = StableDiffusionXLPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=unet,
vae=vae,
revision=args.revision,
variant=args.variant,
torch_dtype=weight_dtype,
)
if args.prediction_type is not None:
scheduler_args = {'prediction_type': args.prediction_type}
pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args)
pipeline.save_pretrained(args.output_dir)
# run inference
images = []
if args.validation_prompt and args.num_validation_images > 0:
pipeline = pipeline.to(accelerator.device)
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
with torch.cuda.amp.autocast():
images = [
pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
for _ in range(args.num_validation_images)
]
for tracker in accelerator.trackers:
if tracker.name == 'tensorboard':
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images('test', np_images, epoch, dataformats='NHWC')
if tracker.name == 'wandb':
tracker.log({
'test': [
wandb.Image(image, caption=f'{i}: {args.validation_prompt}')
for i, image in enumerate(images)
]
})
if args.push_to_hub:
save_model_card(
repo_id=args.hub_model_id,
images=images,
validation_prompt=args.validation_prompt,
base_model=args.base_model_id,
dataset_name=args.dataset_name,
repo_folder=args.output_dir,
vae_path=args.vae_base_model_id,
)
push_to_hub(
args.hub_model_id,
args.output_dir,
args.hub_token,
)
accelerator.end_training()
| swift/swift/aigc/diffusers/train_text_to_image_sdxl.py/0 | {
"file_path": "swift/swift/aigc/diffusers/train_text_to_image_sdxl.py",
"repo_id": "swift",
"token_count": 23775
} | 192 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.ui.app import run_ui
if __name__ == '__main__':
run_ui()
| swift/swift/cli/web_ui.py/0 | {
"file_path": "swift/swift/cli/web_ui.py",
"repo_id": "swift",
"token_count": 45
} | 193 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import copy
import os
import tempfile
import threading
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from http.cookiejar import CookieJar
from pathlib import Path
from typing import Dict, Optional, Union
import requests
from requests.adapters import Retry
from tqdm import tqdm
from swift.utils.logger import get_logger
from .api import HubApi, ModelScopeConfig
from .constants import (API_FILE_DOWNLOAD_CHUNK_SIZE, API_FILE_DOWNLOAD_RETRY_TIMES, API_FILE_DOWNLOAD_TIMEOUT,
DEFAULT_MODEL_REVISION, FILE_HASH, MODELSCOPE_DOWNLOAD_PARALLELS,
MODELSCOPE_PARALLEL_DOWNLOAD_THRESHOLD_MB)
from .errors import FileDownloadError, NotExistError
from .utils.caching import ModelFileSystemCache
from .utils.utils import file_integrity_validation, get_cache_dir, get_endpoint, model_id_to_group_owner_name
logger = get_logger()
def model_file_download(
model_id: str,
file_path: str,
revision: Optional[str] = DEFAULT_MODEL_REVISION,
cache_dir: Optional[str] = None,
user_agent: Union[Dict, str, None] = None,
local_files_only: Optional[bool] = False,
cookies: Optional[CookieJar] = None,
) -> Optional[str]: # pragma: no cover
"""Download from a given URL and cache it if it's not already present in the local cache.
Given a URL, this function looks for the corresponding file in the local
cache. If it's not there, download it. Then return the path to the cached
file.
Args:
model_id (str): The model to whom the file to be downloaded belongs.
file_path(str): Path of the file to be downloaded, relative to the root of model repo.
revision(str, optional): revision of the model file to be downloaded.
Can be any of a branch, tag or commit hash.
cache_dir (str, Path, optional): Path to the folder where cached files are stored.
user_agent (dict, str, optional): The user-agent info in the form of a dictionary or a string.
local_files_only (bool, optional): If `True`, avoid downloading the file and return the path to the
local cached file if it exists. if `False`, download the file anyway even it exists.
cookies (CookieJar, optional): The cookie of download request.
Returns:
string: string of local file or if networking is off, last version of
file cached on disk.
Raises:
NotExistError: The file is not exist.
ValueError: The request parameter error.
Note:
Raises the following errors:
- [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
if `use_auth_token=True` and the token cannot be found.
- [`OSError`](https://docs.python.org/3/library/exceptions.html#OSError)
if ETag cannot be determined.
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
if some parameter value is invalid
"""
if cache_dir is None:
cache_dir = get_cache_dir()
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
temporary_cache_dir = os.path.join(cache_dir, 'temp')
os.makedirs(temporary_cache_dir, exist_ok=True)
group_or_owner, name = model_id_to_group_owner_name(model_id)
cache = ModelFileSystemCache(cache_dir, group_or_owner, name)
# if local_files_only is `True` and the file already exists in cached_path
# return the cached path
if local_files_only:
cached_file_path = cache.get_file_by_path(file_path)
if cached_file_path is not None:
logger.warning("File exists in local cache, but we're not sure it's up to date")
return cached_file_path
else:
raise ValueError('Cannot find the requested files in the cached path and outgoing'
' traffic has been disabled. To enable model look-ups and downloads'
" online, set 'local_files_only' to False.")
_api = HubApi()
headers = {'user-agent': ModelScopeConfig.get_user_agent(user_agent=user_agent, )}
if cookies is None:
cookies = ModelScopeConfig.get_cookies()
revision = _api.get_valid_revision(model_id, revision=revision, cookies=cookies)
file_to_download_info = None
# we need to confirm the version is up-to-date
# we need to get the file list to check if the latest version is cached, if so return, otherwise download
model_files = _api.get_model_files(
model_id=model_id, revision=revision, recursive=True, use_cookies=False if cookies is None else cookies)
for model_file in model_files:
if model_file['Type'] == 'tree':
continue
if model_file['Path'] == file_path:
if cache.exists(model_file):
logger.debug(f'File {model_file["Name"]} already in cache, skip downloading!')
return cache.get_file_by_info(model_file)
else:
file_to_download_info = model_file
break
if file_to_download_info is None:
raise NotExistError('The file path: %s not exist in: %s' % (file_path, model_id))
# we need to download again
url_to_download = get_file_download_url(model_id, file_path, revision)
temp_file_name = next(tempfile._get_candidate_names())
if MODELSCOPE_PARALLEL_DOWNLOAD_THRESHOLD_MB * 1000 * 1000 < file_to_download_info[
'Size'] and MODELSCOPE_DOWNLOAD_PARALLELS > 1:
parallel_download(
url_to_download,
temporary_cache_dir,
temp_file_name,
headers=headers,
cookies=None if cookies is None else cookies.get_dict(),
file_size=file_to_download_info['Size'])
else:
http_get_file(
url_to_download,
temporary_cache_dir,
temp_file_name,
headers=headers,
cookies=None if cookies is None else cookies.get_dict())
temp_file_path = os.path.join(temporary_cache_dir, temp_file_name)
# for download with commit we can't get Sha256
if file_to_download_info[FILE_HASH] is not None:
file_integrity_validation(temp_file_path, file_to_download_info[FILE_HASH])
return cache.put_file(file_to_download_info, os.path.join(temporary_cache_dir, temp_file_name))
def get_file_download_url(model_id: str, file_path: str, revision: str):
"""Format file download url according to `model_id`, `revision` and `file_path`.
e.g., Given `model_id=john/bert`, `revision=master`, `file_path=README.md`,
the resulted download url is: https://modelscope.cn/api/v1/models/john/bert/repo?Revision=master&FilePath=README.md
Args:
model_id (str): The model_id.
file_path (str): File path
revision (str): File revision.
Returns:
str: The file url.
"""
download_url_template = '{endpoint}/api/v1/models/{model_id}/repo?Revision={revision}&FilePath={file_path}'
return download_url_template.format(
endpoint=get_endpoint(),
model_id=model_id,
revision=revision,
file_path=file_path,
)
def download_part(params):
# unpack parameters
progress, start, end, url, file_name, cookies, headers = params
get_headers = {} if headers is None else copy.deepcopy(headers)
get_headers['Range'] = 'bytes=%s-%s' % (start, end)
with open(file_name, 'rb+') as f:
f.seek(start)
r = requests.get(url, stream=True, headers=get_headers, cookies=cookies, timeout=API_FILE_DOWNLOAD_TIMEOUT)
for chunk in r.iter_content(chunk_size=API_FILE_DOWNLOAD_CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
def parallel_download(
url: str,
local_dir: str,
file_name: str,
cookies: CookieJar,
headers: Optional[Dict[str, str]] = None,
file_size: int = None,
):
# create temp file
temp_file_manager = partial(tempfile.NamedTemporaryFile, mode='wb', dir=local_dir, delete=False)
with temp_file_manager() as temp_file:
progress = tqdm(
unit='B',
unit_scale=True,
unit_divisor=1024,
total=file_size,
initial=0,
desc='Downloading',
)
PART_SIZE = 160 * 1024 * 1012 # every part is 160M
tasks = []
for idx in range(int(file_size / PART_SIZE)):
start = idx * PART_SIZE
end = (idx + 1) * PART_SIZE - 1
tasks.append((progress, start, end, url, temp_file.name, cookies, headers))
if end + 1 < file_size:
tasks.append((progress, end + 1, file_size - 1, url, temp_file.name, cookies, headers))
parallels = MODELSCOPE_DOWNLOAD_PARALLELS if MODELSCOPE_DOWNLOAD_PARALLELS <= 4 else 4
with ThreadPoolExecutor(max_workers=parallels, thread_name_prefix='download') as executor:
list(executor.map(download_part, tasks))
progress.close()
os.replace(temp_file.name, os.path.join(local_dir, file_name))
def http_get_file(
url: str,
local_dir: str,
file_name: str,
cookies: CookieJar,
headers: Optional[Dict[str, str]] = None,
):
"""Download remote file, will retry 5 times before giving up on errors.
Args:
url(str):
actual download url of the file
local_dir(str):
local directory where the downloaded file stores
file_name(str):
name of the file stored in `local_dir`
cookies(CookieJar):
cookies used to authentication the user, which is used for downloading private repos
headers(Dict[str, str], optional):
http headers to carry necessary info when requesting the remote file
Raises:
FileDownloadError: File download failed.
"""
total = -1
temp_file_manager = partial(tempfile.NamedTemporaryFile, mode='wb', dir=local_dir, delete=False)
get_headers = {} if headers is None else copy.deepcopy(headers)
with temp_file_manager() as temp_file:
logger.debug('downloading %s to %s', url, temp_file.name)
# retry sleep 0.5s, 1s, 2s, 4s
retry = Retry(total=API_FILE_DOWNLOAD_RETRY_TIMES, backoff_factor=1, allowed_methods=['GET'])
while True:
try:
downloaded_size = temp_file.tell()
get_headers['Range'] = 'bytes=%d-' % downloaded_size
r = requests.get(
url, stream=True, headers=get_headers, cookies=cookies, timeout=API_FILE_DOWNLOAD_TIMEOUT)
r.raise_for_status()
content_length = r.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(
unit='B',
unit_scale=True,
unit_divisor=1024,
total=total,
initial=downloaded_size,
desc='Downloading',
)
for chunk in r.iter_content(chunk_size=API_FILE_DOWNLOAD_CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
break
except (Exception) as e: # no matter what happen, we will retry.
retry = retry.increment('GET', url, error=e)
retry.sleep()
logger.debug('storing %s in cache at %s', url, local_dir)
downloaded_length = os.path.getsize(temp_file.name)
if total != downloaded_length:
os.remove(temp_file.name)
msg = 'File %s download incomplete, content_length: %s but the \
file downloaded length: %s, please download again' % (file_name, total, downloaded_length)
logger.error(msg)
raise FileDownloadError(msg)
os.replace(temp_file.name, os.path.join(local_dir, file_name))
| swift/swift/hub/file_download.py/0 | {
"file_path": "swift/swift/hub/file_download.py",
"repo_id": "swift",
"token_count": 5086
} | 194 |
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"total_num_steps": "auto",
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "none",
"pin_memory": true
},
"offload_param": {
"device": "none",
"pin_memory": true
},
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 1e9,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_16bit_weights_on_model_save": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
| swift/swift/llm/ds_config/zero3.json/0 | {
"file_path": "swift/swift/llm/ds_config/zero3.json",
"repo_id": "swift",
"token_count": 844
} | 195 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from typing import TYPE_CHECKING
from swift.utils.import_utils import _LazyModule
if TYPE_CHECKING:
from .arguments import Seq2SeqTrainingArguments, TrainingArguments
from .dpo_trainer import DPOTrainer
from .orpo_trainer import ORPOTrainer
from .simpo_trainer import SimPOTrainer
from .rlhf_trainers import RLHFTrainerFactory
from .trainers import Seq2SeqTrainer, Trainer
from .utils import EvaluationStrategy, FSDPOption, HPSearchBackend, HubStrategy, \
IntervalStrategy, SchedulerType, ShardedDDPOption, TrainerCallback,\
build_tokenized_answer, concat_template
else:
_import_structure = {
'arguments': ['Seq2SeqTrainingArguments', 'TrainingArguments'],
'dpo_trainer': ['DPOTrainer'],
'orpo_trainer': ['ORPOTrainer'],
'simpo_trainer': ['SimPOTrainer'],
'rlhf_trainers': ['RLHFTrainerFactory'],
'trainers': ['Seq2SeqTrainer', 'Trainer'],
'utils': [
'EvaluationStrategy', 'FSDPOption', 'HPSearchBackend', 'HubStrategy', 'IntervalStrategy', 'SchedulerType',
'ShardedDDPOption', 'TrainerCallback', 'build_tokenized_answer', 'concat_template'
]
}
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
module_spec=__spec__,
extra_objects={},
)
| swift/swift/trainers/__init__.py/0 | {
"file_path": "swift/swift/trainers/__init__.py",
"repo_id": "swift",
"token_count": 598
} | 196 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Literal, Optional
import torch
from torch import nn
from swift.utils.logger import get_logger
from .module_mapping import MODEL_KEYS_MAPPING, ModelKeys
from .utils import SwiftAdapter, SwiftConfig, SwiftOutput
logger = get_logger()
@dataclass
class LLaMAProConfig(SwiftConfig):
"""
The configuration class for the LLaMAPro module.
See https://arxiv.org/abs/2401.02415
Args:
model_type(`str`): LLaMAPro only support parts of the LLM models because of the variables need to be manually
modified.
num_new_blocks(`int`): How many new blocks need to be added
num_groups(`int`): The groups of new blocks are split to. Default equals to `num_new_blocks` which means each
single layer will be inserted into every `num_hidden_layers/num_new_blocks` original layers.
"""
model_type: str = field(
default=None, metadata={
'choices': list(MODEL_KEYS_MAPPING.keys()),
})
num_new_blocks: int = None
num_groups: Optional[int] = None
def __post_init__(self):
from .mapping import SwiftTuners
self.swift_type = SwiftTuners.LLAMAPRO
class LLaMAPro(SwiftAdapter):
@staticmethod
def prepare_model(model: nn.Module, config: LLaMAProConfig, adapter_name: str) -> SwiftOutput:
"""Prepare a model with `LLaMAProConfig`"""
num_hidden_layers = None
if hasattr(model.config, 'num_hidden_layers'):
num_hidden_layers = model.config.num_hidden_layers
elif hasattr(model.config, 'num_layers'):
num_hidden_layers = model.config.num_layers
assert num_hidden_layers is not None, 'Cannot find num of layers config'
assert num_hidden_layers % config.num_new_blocks == 0, f'Model layers {num_hidden_layers} ' \
f'should be divided by {config.num_new_blocks}'
if config.num_groups is None:
config.num_groups = config.num_new_blocks
num_stride = num_hidden_layers // config.num_groups
# We only support decoder only model for now.
module_list = LLaMAPro._find_module_list(config, model)
new_module_list = nn.ModuleList()
new_module_idx = []
for idx, module in enumerate(module_list):
new_module_list.append(module)
if (idx + 1) % num_stride == 0:
new_module = deepcopy(module)
new_module_list.append(new_module)
new_module_idx.append(idx + 1 + len(new_module_idx))
LLaMAPro._update_module_weight(config, new_module_list, new_module_idx)
LLaMAPro._update_module_attr(config, new_module_list)
model.config.num_hidden_layers = len(new_module_list)
LLaMAPro._set_module_list(config, model, new_module_list)
def state_dict_callback(state_dict, adapter_name):
model_key_mapping = LLaMAPro._get_model_key_mapping(config.model_type, config)
new_module_list = [model_key_mapping.module_list + f'.{i}' for i in new_module_idx]
return {
key: value
for key, value in state_dict.items() if any([m_part in key for m_part in new_module_list])
}
def mark_trainable_callback(model):
model_key_mapping = LLaMAPro._get_model_key_mapping(config.model_type, config)
new_module_list = [model_key_mapping.module_list + f'.{i}' for i in new_module_idx]
for name, parameter in model.named_parameters():
parameter: nn.Parameter
if any([m_part in name for m_part in new_module_list]):
parameter.requires_grad = True
return SwiftOutput(config, state_dict_callback, mark_trainable_callback)
@staticmethod
def _get_model_key_mapping(model_type, config) -> ModelKeys:
if model_type in MODEL_KEYS_MAPPING.keys():
model_key_mapping = MODEL_KEYS_MAPPING[model_type]
else:
model_key_mapping = config.model_key_mapping
if model_key_mapping is None:
raise ValueError(f'{model_type} is not defined in MODEL_KEYS_MAPPING, '
f'please consider pass the information through the config.model_key_mapping')
if isinstance(model_key_mapping, dict):
model_key_mapping: ModelKeys = ModelKeys(**model_key_mapping)
assert model_key_mapping.o_proj is not None and model_key_mapping.down_proj is not None, \
'LLaMAPro only support models with o_proj and down_proj components.'
return model_key_mapping
@staticmethod
def _update_module_attr(config: LLaMAProConfig, module_list):
model_type = config.model_type
model_key_mapping = LLaMAPro._get_model_key_mapping(model_type, config)
attention = model_key_mapping.attention
attention = attention.split('{}.')[1]
if model_type in ('llama', 'mistral', 'qwen2', 'yi', 'gemma', 'deepseek', 'openbuddy', 'xverse', 'orion',
'bluelm', 'ziya', 'skywork'):
for idx, module in enumerate(module_list):
getattr(module, attention).layer_idx = idx
elif model_type in ('chatglm', ):
for idx, module in enumerate(module_list):
getattr(module, attention).layer_number = idx
elif model_type in ('phi2', ):
for idx, module in enumerate(module_list):
getattr(module, attention).block_idx = idx
@staticmethod
def _update_module_weight(config: LLaMAProConfig, module_list, new_module_idx):
model_key_mapping = LLaMAPro._get_model_key_mapping(config.model_type, config)
o_proj = model_key_mapping.o_proj.split('{}.')[1]
down_proj = model_key_mapping.o_proj.split('{}.')[1]
for idx, module in enumerate(module_list):
if idx not in new_module_idx:
continue
_o_proj: nn.Linear = module.get_submodule(o_proj)
_down_proj: nn.Linear = module.get_submodule(down_proj)
_o_proj.weight.data = torch.zeros_like(_o_proj.weight.data)
_down_proj.weight.data = torch.zeros_like(_down_proj.weight.data)
if hasattr(_o_proj, 'bias') and _o_proj.bias:
_o_proj.bias = torch.zeros_like(_o_proj.bias)
if hasattr(_down_proj, 'bias') and _down_proj.bias:
_down_proj.bias = torch.zeros_like(_down_proj.bias)
@staticmethod
def _set_module_list(config, module: nn.Module, module_list: nn.ModuleList):
model_key_mapping = LLaMAPro._get_model_key_mapping(config.model_type, config)
idx = model_key_mapping.module_list.rfind('.')
parent = module.get_submodule(model_key_mapping.module_list[:idx])
setattr(parent, model_key_mapping.module_list[idx + 1:], module_list)
@staticmethod
def _find_module_list(config, module: nn.Module) -> nn.ModuleList:
model_key_mapping = LLaMAPro._get_model_key_mapping(config.model_type, config)
return module.get_submodule(model_key_mapping.module_list)
@staticmethod
def activate_adapter(module: torch.nn.Module, adapter_name: str, activate: bool, offload: str = None):
for sub_module in module.modules():
if isinstance(sub_module, torch.nn.Embedding):
sub_module.nef_activated = activate
@staticmethod
def has_additional_modules():
return True
| swift/swift/tuners/llamapro.py/0 | {
"file_path": "swift/swift/tuners/llamapro.py",
"repo_id": "swift",
"token_count": 3385
} | 197 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from .rome import Rome, RomeConfig
| swift/swift/tuners/rome/__init__.py/0 | {
"file_path": "swift/swift/tuners/rome/__init__.py",
"repo_id": "swift",
"token_count": 23
} | 198 |
# Copyright (c) Alibaba, Inc. and its affiliates.
# Part of the implementation is borrowed from kmeng01/rome.
from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Dict, List, Set, Tuple
import torch
import torch.nn as nn
from swift import SwiftConfig
from swift.tuners.utils import SwiftAdapter, SwiftOutput
from swift.utils import get_logger
from .compute_u import compute_u
from .compute_v import compute_v
from .context_template import context_template
from .nethook import get_parameter
from .rome_hparams import ROMEHyperParams
CONTEXT_TEMPLATES_CACHE = None
logger = get_logger()
@dataclass
class RomeConfig(SwiftConfig):
"""
The configuration class for the ROME module.
This adapter can be used to inject/modify knowledge to models, without any training.
ROME: [Rank-One Editing of Encoder-Decoder Models](https://arxiv.org/abs/2211.13317)
Args:
model_type(`str`): The model type, now support llama-7b/llama-13b
tokenizer(`AutoTokenizer`): The tokenizer
knowledge(`List[Dict]`): The knowledge to be injected to the model.
format:
>>> [
>>> {
>>> "prompt": "{} was the founder of",
>>> "subject": "Steve Jobs",
>>> "target": "Microsoft"
>>> }
>>> ]
"""
model_type: str = field(default=None, metadata={'help': 'The model type'})
tokenizer: Any = field(default=None, metadata={'help': 'The tokenizer matching this model'})
knowledge: List[Dict] = field(default=False, metadata={'help': 'The knowledge to be used'})
batch_first: bool = field(default=True, metadata={'help': 'Batch at the first dimension or not'})
def __post_init__(self):
from swift.tuners.mapping import SwiftTuners
self.swift_type = SwiftTuners.ROME
@property
def __dict__(self):
_dict = super(RomeConfig, self).__dict__
_dict.pop('tokenizer')
return _dict
class Rome(SwiftAdapter):
@staticmethod
def prepare_model(model: nn.Module, config: RomeConfig, adapter_name: str):
"""
Applies the selected model editing algorithm. Generates text both before and after
for comparison of model behavior. Returns the updated model and the original values of
weights that were changed.
"""
modified_keys = set()
if config.tokenizer is not None:
for param in model.parameters():
param.requires_grad = True
hparams = ROMEHyperParams.from_name(config.model_type)
modified_keys = apply_rome_to_model(model, config.tokenizer, config.knowledge, hparams, config.batch_first)
def state_dict_callback(state_dict, adapter_name):
return {key: value for key, value in state_dict.items() if key in modified_keys}
def mark_trainable_callback(model):
pass
return SwiftOutput(config, state_dict_callback, mark_trainable_callback)
@staticmethod
def has_additional_modules():
return False
def apply_rome_to_model(
model: torch.nn.Module,
tokenizer: Any,
knowledge: List[Dict],
hparams: ROMEHyperParams,
batch_first: bool,
) -> Set:
"""Apply ROME to a model
Args:
model(`torch.nn.Module`): The model instance.
tokenizer(`Any`): The tokenizer.
knowledge(`List[Dict]`): The knowledge to be filled into the model.
hparams(`ROMEHyperParams`): The hyperparameter of ROME
batch_first(`bool`): Batch first of not.
"""
modified_keys = set()
for i, request in enumerate(knowledge):
deltas = execute_rome(model, tokenizer, request, hparams, batch_first)
with torch.no_grad():
for w_name, (delta_u, delta_v) in deltas.items():
upd_matrix = delta_u.unsqueeze(1) @ delta_v.unsqueeze(0)
w = get_parameter(model, w_name)
upd_matrix = upd_matrix_match_shape(upd_matrix, w.shape)
w[...] += upd_matrix
modified_keys.update(set(deltas.keys()))
return modified_keys
def execute_rome(
model: torch.nn.Module,
tok: Any,
knowledge: Dict,
hparams: ROMEHyperParams,
batch_first: bool,
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
"""
Executes the ROME update algorithm for the specified update at the specified layer
Invariant: model at beginning of function == model at end of function
"""
# Update target and print info
request = deepcopy(knowledge)
logger.info(f'Executing ROME algorithm for the update: '
f"[{request['prompt'].format(request['subject'])}] -> [{request['target']}]")
# Retrieve weights that user desires to change
weights = {
f'{hparams.rewrite_module_tmp.format(layer)}.weight':
get_parameter(model, f'{hparams.rewrite_module_tmp.format(layer)}.weight')
for layer in hparams.layers
}
# Save old weights for future restoration
weights_copy = {k: v.detach().clone() for k, v in weights.items()}
# Update loop: sequentially intervene at each specified layer
deltas = {}
for layer in sorted(hparams.layers):
# Compute rank-1 update matrix
left_vector: torch.Tensor = compute_u(
model,
tok,
request,
hparams,
layer,
context_template,
batch_first=batch_first,
)
logger.info(f'Left vector shape: {left_vector.shape}')
right_vector: torch.Tensor = compute_v(
model,
tok,
request,
hparams,
layer,
left_vector,
context_template,
batch_first=batch_first,
)
logger.info(f'Right vector shape: {right_vector.shape}')
right_vector = right_vector.to(left_vector.dtype)
with torch.no_grad():
# Determine correct transposition of delta matrix
weight_name = f'{hparams.rewrite_module_tmp.format(layer)}.weight'
upd_matrix = left_vector.unsqueeze(1) @ right_vector.unsqueeze(0)
upd_matrix = upd_matrix_match_shape(upd_matrix, weights[weight_name].shape)
# Update model weights and record desired changes in `delta` variable
weights[weight_name][...] += upd_matrix
deltas[weight_name] = (
left_vector.detach(),
right_vector.detach(),
)
# Restore state of original model
with torch.no_grad():
for k, v in weights.items():
v[...] = weights_copy[k]
logger.info(f'Deltas successfully computed for {list(weights.keys())}')
return deltas
def upd_matrix_match_shape(matrix: torch.Tensor, shape: torch.Size) -> torch.Tensor:
"""
GPT-2 and GPT-J have transposed weight representations.
Returns a matrix that matches the desired shape, else raises a ValueError
"""
if matrix.shape == shape:
return matrix
elif matrix.T.shape == shape:
return matrix.T
else:
raise ValueError('Update matrix computed by ROME does not match original weight shape. '
'Check for bugs in the code?')
| swift/swift/tuners/rome/rome.py/0 | {
"file_path": "swift/swift/tuners/rome/rome.py",
"repo_id": "swift",
"token_count": 3021
} | 199 |
from typing import Type
import gradio as gr
from swift.llm import DATASET_MAPPING
from swift.ui.base import BaseUI
class Export(BaseUI):
group = 'llm_export'
locale_dict = {
'merge_lora': {
'label': {
'zh': '合并lora',
'en': 'Merge lora'
},
'info': {
'zh':
'lora合并的路径在填入的checkpoint同级目录,请查看运行时log获取更具体的信息',
'en':
'The output path is in the sibling directory as the input checkpoint. '
'Please refer to the runtime log for more specific information.'
},
},
'merge_device_map': {
'label': {
'zh': '合并lora使用的device_map',
'en': 'The device_map when merge-lora'
},
'info': {
'zh': '如果显存不够请填入cpu',
'en': 'If GPU memory is not enough, fill in cpu'
},
},
'quant_bits': {
'label': {
'zh': '量化比特数',
'en': 'Quantize bits'
},
},
'quant_method': {
'label': {
'zh': '量化方法',
'en': 'Quantize method'
},
},
'quant_n_samples': {
'label': {
'zh': '量化集采样数',
'en': 'Sampled rows from calibration dataset'
},
},
'quant_seqlen': {
'label': {
'zh': '量化集的max-length',
'en': 'The quantize sequence length'
},
},
'quant_output_dir': {
'label': {
'zh': '量化输出路径',
'en': 'Output dir for quantization'
},
'info': {
'zh':
'如果仅merge-lora不需要修改这里, 留空时量化输出在当前目录的<model-type>-<quant_method>-<quant_bits>下',
'en':
'If only merging LoRA, no need to modify this. When left blank, '
'the output will be in the current directory under <model-type>-<quant_method>-<quant_bits>'
},
},
'dataset': {
'label': {
'zh': '校准数据集',
'en': 'Calibration datasets'
},
},
}
@classmethod
def do_build_ui(cls, base_tab: Type['BaseUI']):
with gr.Row():
gr.Checkbox(elem_id='merge_lora', scale=10)
gr.Textbox(elem_id='merge_device_map', scale=20)
with gr.Row():
gr.Textbox(elem_id='quant_bits', scale=20)
gr.Dropdown(elem_id='quant_method', scale=20)
gr.Textbox(elem_id='quant_n_samples', scale=20)
gr.Textbox(elem_id='quant_seqlen', scale=20)
with gr.Row():
gr.Textbox(elem_id='quant_output_dir', scale=20)
gr.Dropdown(elem_id='dataset', multiselect=True, choices=list(DATASET_MAPPING.keys()), scale=20)
| swift/swift/ui/llm_export/export.py/0 | {
"file_path": "swift/swift/ui/llm_export/export.py",
"repo_id": "swift",
"token_count": 1850
} | 200 |
import os
from typing import Type
import gradio as gr
from swift.llm import DATASET_MAPPING
from swift.ui.base import BaseUI
class Dataset(BaseUI):
group = 'llm_train'
locale_dict = {
'dataset': {
'label': {
'zh': '数据集名称',
'en': 'Dataset Code'
},
'info': {
'zh': '选择训练的数据集,支持复选',
'en': 'The dataset(s) to train the models'
}
},
'max_length': {
'label': {
'zh': '句子最大长度',
'en': 'The max length',
},
'info': {
'zh': '设置输入模型的最大长度',
'en': 'Set the max length input to the model',
}
},
'custom_train_dataset_path': {
'label': {
'zh': '自定义训练数据集路径',
'en': 'Custom train dataset path'
},
'info': {
'zh': '输入自定义的训练数据集路径,空格分隔',
'en': 'Extra train files, split by blank'
}
},
'custom_val_dataset_path': {
'label': {
'zh': '自定义校验数据集路径',
'en': 'Custom val dataset path'
},
'info': {
'zh': '输入自定义的校验数据集路径,逗号分隔',
'en': 'Extra val files, split by comma'
}
},
'dataset_test_ratio': {
'label': {
'zh': '验证集拆分比例',
'en': 'Split ratio of eval dataset'
},
'info': {
'zh': '表示将总数据的多少拆分到验证集中',
'en': 'Split the datasets by this ratio for eval'
}
},
'train_dataset_sample': {
'label': {
'zh': '训练集采样数量',
'en': 'The sample size from the train dataset'
},
'info': {
'zh': '从训练集中采样一定行数进行训练',
'en': 'Train with the sample size from the dataset',
}
},
'val_dataset_sample': {
'label': {
'zh': '验证集采样数量',
'en': 'The sample size from the val dataset'
},
'info': {
'zh': '从验证集中采样一定行数进行训练',
'en': 'Validate with the sample size from the dataset',
}
},
'truncation_strategy': {
'label': {
'zh': '数据集超长策略',
'en': 'Dataset truncation strategy'
},
'info': {
'zh': '如果token超长该如何处理',
'en': 'How to deal with the rows exceed the max length'
}
},
'custom_dataset_info': {
'label': {
'zh': '外部数据集配置',
'en': 'Custom dataset config'
},
'info': {
'zh': '注册外部数据集的配置文件',
'en': 'An extra dataset config to register your own datasets'
}
},
}
@classmethod
def do_build_ui(cls, base_tab: Type['BaseUI']):
with gr.Row():
gr.Dropdown(elem_id='dataset', multiselect=True, choices=list(DATASET_MAPPING.keys()), scale=20)
gr.Textbox(elem_id='custom_dataset_info', is_list=False, scale=20)
gr.Textbox(elem_id='custom_train_dataset_path', is_list=True, scale=20)
gr.Textbox(elem_id='custom_val_dataset_path', is_list=True, scale=20)
with gr.Row():
gr.Slider(elem_id='dataset_test_ratio', minimum=0.0, maximum=1.0, step=0.05, scale=20)
gr.Slider(elem_id='max_length', minimum=32, maximum=32768, step=32, scale=20)
gr.Textbox(elem_id='train_dataset_sample', scale=20)
gr.Textbox(elem_id='val_dataset_sample', scale=20)
gr.Dropdown(elem_id='truncation_strategy', scale=20)
| swift/swift/ui/llm_train/dataset.py/0 | {
"file_path": "swift/swift/ui/llm_train/dataset.py",
"repo_id": "swift",
"token_count": 2582
} | 201 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from typing import Dict, Literal
import numpy as np
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
from rouge.rouge import Rouge
from torch import Tensor
from transformers.trainer_utils import EvalPrediction
from .logger import get_logger
logger = get_logger()
def compute_nlg_metrics(prediction, tokenizer):
import jieba
preds, labels = prediction[0], prediction[1]
score_dict = {'rouge-1': [], 'rouge-2': [], 'rouge-l': [], 'bleu-4': []}
def _decode(tokens, ignore_pad_token_for_loss=False):
if ignore_pad_token_for_loss:
tokens = np.where(tokens != -100, tokens, tokenizer.pad_token_id)
tokens = np.where(tokens < tokenizer.vocab_size, tokens, tokenizer.pad_token_id)
return [t for t in tokenizer.batch_decode(tokens, skip_special_tokens=True)]
for pred, label in zip(preds, labels):
pred = ''.join(_decode(pred, False))
label = ''.join(_decode(label, True))
hypothesis = list(jieba.cut(pred))
if len(hypothesis) == 0 or ''.join(hypothesis) == '.':
hypothesis = [tokenizer.decode(tokenizer.eos_token_id)]
reference = list(jieba.cut(label))
try:
rouge = Rouge()
scores = rouge.get_scores(' '.join(hypothesis), ' '.join(reference))
result = scores[0]
for k, v in result.items():
score_dict[k].append(round(v['f'] * 100, 4))
bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3)
score_dict['bleu-4'].append(round(bleu_score * 100, 4))
except Exception as e:
logger.error(e)
logger.error(f'eval error {hypothesis}, {reference}')
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
def compute_acc_metrics(eval_prediction: EvalPrediction,
acc_strategy: Literal['token', 'sentence'] = 'token') -> Dict[str, Tensor]:
labels = eval_prediction.label_ids[..., 1:]
predictions = eval_prediction.predictions[..., :-1]
if predictions.shape != labels.shape:
return {}
masks = labels != -100
if acc_strategy == 'sentence':
acc_list = []
for i, m in enumerate(masks):
acc_list.append(np.all(predictions[i, m] == labels[i, m]))
acc = np.mean(np.array(acc_list))
else:
acc = np.mean((predictions[masks] == labels[masks]).astype(np.float64))
return {'acc': acc}
def preprocess_logits_for_metrics(logits: Tensor, labels: Tensor) -> Tensor:
if isinstance(logits, (list, tuple)):
logits = logits[0]
preds = logits.argmax(dim=-1)
return preds
| swift/swift/utils/metric.py/0 | {
"file_path": "swift/swift/utils/metric.py",
"repo_id": "swift",
"token_count": 1203
} | 202 |
import os
import unittest
import torch
from swift.llm.utils import *
from swift.utils import lower_bound, seed_everything
SKPT_TEST = True
class TestVllmUtils(unittest.TestCase):
@unittest.skipIf(SKPT_TEST, 'To avoid citest error: OOM')
def test_inference_vllm(self):
model_type = ModelType.qwen_7b_chat
llm_engine = get_vllm_engine(model_type, torch.float16)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
request_list = [{'query': '浙江的省会在哪?'}, {'query': '你好!'}]
# test inference_vllm
response_list = inference_vllm(llm_engine, template, request_list, verbose=True)
for response in response_list:
print(response)
# test inference_stream_vllm
gen = inference_stream_vllm(llm_engine, template, request_list)
for response_list in gen:
print(response_list[0]['response'], response_list[0]['history'])
print(response_list[1]['response'], response_list[1]['history'])
if __name__ == '__main__':
unittest.main()
| swift/tests/llm/test_vllm_utils.py/0 | {
"file_path": "swift/tests/llm/test_vllm_utils.py",
"repo_id": "swift",
"token_count": 492
} | 203 |
import os
import shutil
import tempfile
import unittest
from swift.utils import append_to_jsonl, get_logger, read_from_jsonl, write_to_jsonl
logger = get_logger()
class TestIOUtils(unittest.TestCase):
def setUp(self):
self._tmp_dir = tempfile.TemporaryDirectory()
self.tmp_dir = self._tmp_dir.name
# self.tmp_dir = 'test'
logger.info(f'self.tmp_dir: {self.tmp_dir}')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_jsonl(self):
fpath = os.path.join(self.tmp_dir, '1.jsonl')
obj_list = [{'aaa': 'bbb'}, 111, [1.1]]
write_to_jsonl(fpath, obj_list)
new_obj = {'bbb': 'aaa'}
obj_list.append(new_obj)
append_to_jsonl(fpath, new_obj)
new_obj_list = read_from_jsonl(fpath)
self.assertTrue(new_obj_list == obj_list)
def test_jsonl2(self):
fpath = os.path.join(self.tmp_dir, '1.jsonl')
obj_list = [{'aaa': 'bbb'}, 111, [1.1]]
for obj in obj_list:
append_to_jsonl(fpath, obj)
new_obj_list = read_from_jsonl(fpath)
self.assertTrue(new_obj_list == obj_list)
if __name__ == '__main__':
unittest.main()
| swift/tests/utils/test_io_utils.py/0 | {
"file_path": "swift/tests/utils/test_io_utils.py",
"repo_id": "swift",
"token_count": 582
} | 204 |
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module> | .idea/wensimin-work.iml/0 | {
"file_path": ".idea/wensimin-work.iml",
"repo_id": ".idea",
"token_count": 105
} | 0 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="0f53c228-02fe-4b6e-987c-28c3d984fda3" name="更改" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="ProjectColorInfo">{
"associatedIndex": 0
}</component>
<component name="ProjectId" id="2iRYaNzXOl5qqEXRnjnwiV4IurJ" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">{
"keyToString": {
"RunOnceActivity.ShowReadmeOnStart": "true",
"git-widget-placeholder": "main",
"node.js.detected.package.eslint": "true",
"node.js.detected.package.tslint": "true",
"node.js.selected.package.eslint": "(autodetect)",
"node.js.selected.package.tslint": "(autodetect)",
"nodejs_package_manager_path": "npm",
"vue.rearranger.settings.migration": "true"
}
}</component>
<component name="RdControllerToolWindowsLayoutState" isNewUi="true">
<layout>
<window_info id="Bookmarks" show_stripe_button="false" side_tool="true" />
<window_info id="Merge Requests" show_stripe_button="false" />
<window_info id="Commit_Guest" show_stripe_button="false" />
<window_info id="Pull Requests" show_stripe_button="false" />
<window_info id="Learn" show_stripe_button="false" />
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.2928487" />
<window_info id="Commit" order="1" weight="0.25" />
<window_info id="Structure" order="2" side_tool="true" weight="0.25" />
<window_info anchor="bottom" id="Database Changes" show_stripe_button="false" />
<window_info anchor="bottom" id="TypeScript" show_stripe_button="false" />
<window_info anchor="bottom" id="TODO" show_stripe_button="false" />
<window_info anchor="bottom" id="File Transfer" show_stripe_button="false" />
<window_info anchor="bottom" id="Version Control" order="0" />
<window_info anchor="bottom" id="Problems" order="1" />
<window_info anchor="bottom" id="Problems View" order="2" />
<window_info active="true" anchor="bottom" id="Terminal" order="3" visible="true" weight="0.32690406" />
<window_info anchor="bottom" id="Services" order="4" />
<window_info anchor="bottom" id="Python Packages" order="5" weight="0.1" />
<window_info anchor="bottom" id="Python Console" order="6" weight="0.1" />
<window_info anchor="right" id="Endpoints" show_stripe_button="false" />
<window_info anchor="right" id="Coverage" show_stripe_button="false" side_tool="true" />
<window_info anchor="right" id="SciView" show_stripe_button="false" />
<window_info anchor="right" content_ui="combo" id="Notifications" order="0" weight="0.25" />
<window_info anchor="right" id="AIAssistant" order="1" weight="0.25" />
<window_info anchor="right" id="Database" order="2" weight="0.25" />
<window_info anchor="right" id="Gradle" order="3" weight="0.25" />
<window_info anchor="right" id="Maven" order="4" weight="0.25" />
<window_info anchor="right" id="Plots" order="5" weight="0.1" />
<window_info anchor="right" id="Translation.Wordbook" order="6" show_stripe_button="false" side_tool="true" />
</layout>
</component>
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-js-predefined-1d06a55b98c1-74d2a5396914-JavaScript-PY-241.14494.241" />
<option value="bundled-python-sdk-0509580d9d50-28c9f5db9ffe-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-241.14494.241" />
</set>
</attachedChunks>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="默认任务">
<changelist id="0f53c228-02fe-4b6e-987c-28c3d984fda3" name="更改" comment="" />
<created>1719454919444</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1719454919444</updated>
<workItem from="1719454920494" duration="7322000" />
<workItem from="1719562901843" duration="3485000" />
<workItem from="1719796090889" duration="2454000" />
<workItem from="1719974270498" duration="6495000" />
</task>
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
</project> | LLaMA-Factory/.idea/workspace.xml/0 | {
"file_path": "LLaMA-Factory/.idea/workspace.xml",
"repo_id": "LLaMA-Factory",
"token_count": 2142
} | 1 |
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
Currently we support datasets in **alpaca** and **sharegpt** format.
```json
"dataset_name": {
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url and file_name)",
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
"subset": "the name of the subset. (optional, default: None)",
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
"num_samples": "the number of samples in the dataset used for training. (optional, default: None)",
"columns (optional)": {
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
"query": "the column name in the dataset containing the queries. (default: input)",
"response": "the column name in the dataset containing the responses. (default: output)",
"history": "the column name in the dataset containing the histories. (default: None)",
"messages": "the column name in the dataset containing the messages. (default: conversations)",
"system": "the column name in the dataset containing the system prompts. (default: None)",
"tools": "the column name in the dataset containing the tool description. (default: None)",
"images": "the column name in the dataset containing the image inputs. (default: None)",
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
},
"tags (optional, used for the sharegpt format)": {
"role_tag": "the key in the message represents the identity. (default: from)",
"content_tag": "the key in the message represents the content. (default: value)",
"user_tag": "the value of the role_tag represents the user. (default: human)",
"assistant_tag": "the value of the role_tag represents the assistant. (default: gpt)",
"observation_tag": "the value of the role_tag represents the tool results. (default: observation)",
"function_tag": "the value of the role_tag represents the function call. (default: function_call)",
"system_tag": "the value of the role_tag represents the system prompt. (default: system, can override system column)"
}
}
```
## Alpaca Format
### Supervised Fine-Tuning Dataset
* [Example dataset](alpaca_en_demo.json)
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the human prompt, then the human prompt would be `instruction\ninput`. The `output` column represents the model response.
The `system` column will be used as the system prompt if specified.
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"output": "model response (required)",
"system": "system prompt (optional)",
"history": [
["human instruction in the first round (optional)", "model response in the first round (optional)"],
["human instruction in the second round (optional)", "model response in the second round (optional)"]
]
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"system": "system",
"history": "history"
}
}
```
### Pre-training Dataset
- [Example dataset](c4_demo.json)
In pre-training, only the `text` column will be used for model learning.
```json
[
{"text": "document"},
{"text": "document"}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "text"
}
}
```
### Preference Dataset
Preference datasets are used for reward modeling, DPO training and ORPO training.
It requires a better response in `chosen` column and a worse response in `rejected` column.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"chosen": "chosen answer (required)",
"rejected": "rejected answer (required)"
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"ranking": true,
"columns": {
"prompt": "instruction",
"query": "input",
"chosen": "chosen",
"rejected": "rejected"
}
}
```
### KTO Dataset
- [Example dataset](kto_en_demo.json)
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"output": "model response (required)",
"kto_tag": "human feedback [true/false] (required)"
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"kto_tag": "kto_tag"
}
}
```
### Multimodal Dataset
- [Example dataset](mllm_demo.json)
Multimodal datasets require a `images` column containing the paths to the input images. Currently we only support one image.
```json
[
{
"instruction": "human instruction (required)",
"input": "human input (optional)",
"output": "model response (required)",
"images": [
"image path (required)"
]
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"columns": {
"prompt": "instruction",
"query": "input",
"response": "output",
"images": "images"
}
}
```
## Sharegpt Format
### Supervised Fine-Tuning Dataset
- [Example dataset](glaive_toolcall_en_demo.json)
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
```json
[
{
"conversations": [
{
"from": "human",
"value": "human instruction"
},
{
"from": "function_call",
"value": "tool arguments"
},
{
"from": "observation",
"value": "tool result"
},
{
"from": "gpt",
"value": "model response"
}
],
"system": "system prompt (optional)",
"tools": "tool description (optional)"
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"formatting": "sharegpt",
"columns": {
"messages": "conversations",
"system": "system",
"tools": "tools"
}
}
```
### Preference Dataset
- [Example dataset](dpo_en_demo.json)
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
```json
[
{
"conversations": [
{
"from": "human",
"value": "human instruction"
},
{
"from": "gpt",
"value": "model response"
},
{
"from": "human",
"value": "human instruction"
}
],
"chosen": {
"from": "gpt",
"value": "chosen answer (required)"
},
"rejected": {
"from": "gpt",
"value": "rejected answer (required)"
}
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"formatting": "sharegpt",
"ranking": true,
"columns": {
"messages": "conversations",
"chosen": "chosen",
"rejected": "rejected"
}
}
```
### OpenAI Format
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
```json
[
{
"messages": [
{
"role": "system",
"content": "system prompt (optional)"
},
{
"role": "user",
"content": "human instruction"
},
{
"role": "assistant",
"content": "model response"
}
]
}
]
```
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
```json
"dataset_name": {
"file_name": "data.json",
"formatting": "sharegpt",
"columns": {
"messages": "messages"
},
"tags": {
"role_tag": "role",
"content_tag": "content",
"user_tag": "user",
"assistant_tag": "assistant",
"system_tag": "system"
}
}
```
The KTO datasets and multimodal datasets in sharegpt format are similar to the alpaca format.
Pre-training datasets are **incompatible** with the sharegpt format.
| LLaMA-Factory/data/README.md/0 | {
"file_path": "LLaMA-Factory/data/README.md",
"repo_id": "LLaMA-Factory",
"token_count": 3465
} | 2 |
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
### method
stage: rm
do_train: true
finetuning_type: lora
lora_target: all
### dataset
dataset: dpo_en_demo
template: llama3
cutoff_len: 1024
max_samples: 1000
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/reward
logging_steps: 10
save_steps: 500
plot_loss: true
overwrite_output_dir: true
### train
per_device_train_batch_size: 1
gradient_accumulation_steps: 8
learning_rate: 1.0e-5
num_train_epochs: 3.0
lr_scheduler_type: cosine
warmup_ratio: 0.1
fp16: true
ddp_timeout: 180000000
### eval
val_size: 0.1
per_device_eval_batch_size: 1
eval_strategy: steps
eval_steps: 500
| LLaMA-Factory/examples/train_lora/llama3_lora_reward.yaml/0 | {
"file_path": "LLaMA-Factory/examples/train_lora/llama3_lora_reward.yaml",
"repo_id": "LLaMA-Factory",
"token_count": 282
} | 3 |
---
base_model: /home/ubuntu/.cache/modelscope/hub/Qwen/Qwen2-0___5B
library_name: peft
license: other
tags:
- llama-factory
- lora
- generated_from_trainer
model-index:
- name: train_2024-06-27-07-03-48
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# train_2024-06-27-07-03-48
This model is a fine-tuned version of [/home/ubuntu/.cache/modelscope/hub/Qwen/Qwen2-0___5B](https://huggingface.co//home/ubuntu/.cache/modelscope/hub/Qwen/Qwen2-0___5B) on the alpaca_gpt4_zh dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 2
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 16
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- num_epochs: 3.0
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- PEFT 0.11.1
- Transformers 4.41.2
- Pytorch 2.3.1+cu121
- Datasets 2.18.0
- Tokenizers 0.19.1 | LLaMA-Factory/saves/Qwen2-0.5B/lora/train_2024-06-27-07-03-48/README.md/0 | {
"file_path": "LLaMA-Factory/saves/Qwen2-0.5B/lora/train_2024-06-27-07-03-48/README.md",
"repo_id": "LLaMA-Factory",
"token_count": 465
} | 4 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uvicorn
from llamafactory.api.app import create_app
from llamafactory.chat import ChatModel
def main():
chat_model = ChatModel()
app = create_app(chat_model)
api_host = os.environ.get("API_HOST", "0.0.0.0")
api_port = int(os.environ.get("API_PORT", "8000"))
print("Visit http://localhost:{}/docs for API document.".format(api_port))
uvicorn.run(app, host=api_host, port=api_port)
if __name__ == "__main__":
main()
| LLaMA-Factory/src/api.py/0 | {
"file_path": "LLaMA-Factory/src/api.py",
"repo_id": "LLaMA-Factory",
"token_count": 338
} | 5 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .collator import KTODataCollatorWithPadding, PairwiseDataCollatorWithPadding
from .data_utils import Role, split_dataset
from .loader import get_dataset
from .template import TEMPLATES, Template, get_template_and_fix_tokenizer
__all__ = [
"KTODataCollatorWithPadding",
"PairwiseDataCollatorWithPadding",
"Role",
"split_dataset",
"get_dataset",
"TEMPLATES",
"Template",
"get_template_and_fix_tokenizer",
]
| LLaMA-Factory/src/llamafactory/data/__init__.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/data/__init__.py",
"repo_id": "LLaMA-Factory",
"token_count": 322
} | 6 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import TYPE_CHECKING, Callable, Literal, Optional, Tuple
from .processors.feedback import preprocess_feedback_dataset
from .processors.pairwise import preprocess_pairwise_dataset, print_pairwise_dataset_example
from .processors.pretrain import preprocess_pretrain_dataset
from .processors.supervised import (
preprocess_packed_supervised_dataset,
preprocess_supervised_dataset,
print_supervised_dataset_example,
)
from .processors.unsupervised import preprocess_unsupervised_dataset, print_unsupervised_dataset_example
if TYPE_CHECKING:
from transformers import PreTrainedTokenizer, ProcessorMixin, Seq2SeqTrainingArguments
from ..hparams import DataArguments
from .template import Template
def get_preprocess_and_print_func(
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
stage: Literal["pt", "sft", "rm", "ppo", "kto"],
template: "Template",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
) -> Tuple[Callable, Callable]:
if stage == "pt":
preprocess_func = partial(
preprocess_pretrain_dataset,
tokenizer=tokenizer,
data_args=data_args,
)
print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer)
elif stage == "sft" and not training_args.predict_with_generate:
if data_args.packing:
preprocess_func = partial(
preprocess_packed_supervised_dataset,
template=template,
tokenizer=tokenizer,
data_args=data_args,
)
else:
preprocess_func = partial(
preprocess_supervised_dataset,
template=template,
tokenizer=tokenizer,
processor=processor,
data_args=data_args,
)
print_function = partial(print_supervised_dataset_example, tokenizer=tokenizer)
elif stage == "rm":
preprocess_func = partial(
preprocess_pairwise_dataset,
template=template,
tokenizer=tokenizer,
processor=processor,
data_args=data_args,
)
print_function = partial(print_pairwise_dataset_example, tokenizer=tokenizer)
elif stage == "kto":
preprocess_func = partial(
preprocess_feedback_dataset,
template=template,
tokenizer=tokenizer,
processor=processor,
data_args=data_args,
)
print_function = partial(print_supervised_dataset_example, tokenizer=tokenizer)
else:
preprocess_func = partial(
preprocess_unsupervised_dataset,
template=template,
tokenizer=tokenizer,
processor=processor,
data_args=data_args,
)
print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer)
return preprocess_func, print_function
| LLaMA-Factory/src/llamafactory/data/preprocess.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/data/preprocess.py",
"repo_id": "LLaMA-Factory",
"token_count": 1470
} | 7 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict, defaultdict
from enum import Enum
from typing import Dict, Optional
from peft.utils import SAFETENSORS_WEIGHTS_NAME as SAFE_ADAPTER_WEIGHTS_NAME
from peft.utils import WEIGHTS_NAME as ADAPTER_WEIGHTS_NAME
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME
CHECKPOINT_NAMES = {
SAFE_ADAPTER_WEIGHTS_NAME,
ADAPTER_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
}
CHOICES = ["A", "B", "C", "D"]
DATA_CONFIG = "dataset_info.json"
DEFAULT_TEMPLATE = defaultdict(str)
FILEEXT2TYPE = {
"arrow": "arrow",
"csv": "csv",
"json": "json",
"jsonl": "json",
"parquet": "parquet",
"txt": "text",
}
IGNORE_INDEX = -100
LAYERNORM_NAMES = {"norm", "ln"}
LLAMABOARD_CONFIG = "llamaboard_config.yaml"
METHODS = ["full", "freeze", "lora"]
MOD_SUPPORTED_MODELS = {"bloom", "falcon", "gemma", "llama", "mistral", "mixtral", "phi", "starcoder2"}
PEFT_METHODS = {"lora"}
RUNNING_LOG = "running_log.txt"
SUBJECTS = ["Average", "STEM", "Social Sciences", "Humanities", "Other"]
SUPPORTED_MODELS = OrderedDict()
TRAINER_LOG = "trainer_log.jsonl"
TRAINING_ARGS = "training_args.yaml"
TRAINING_STAGES = {
"Supervised Fine-Tuning": "sft",
"Reward Modeling": "rm",
"PPO": "ppo",
"DPO": "dpo",
"KTO": "kto",
"Pre-Training": "pt",
}
STAGES_USE_PAIR_DATA = {"rm", "dpo"}
SUPPORTED_CLASS_FOR_S2ATTN = {"llama"}
V_HEAD_WEIGHTS_NAME = "value_head.bin"
V_HEAD_SAFE_WEIGHTS_NAME = "value_head.safetensors"
VISION_MODELS = set()
class DownloadSource(str, Enum):
DEFAULT = "hf"
MODELSCOPE = "ms"
def register_model_group(
models: Dict[str, Dict[DownloadSource, str]],
template: Optional[str] = None,
vision: bool = False,
) -> None:
prefix = None
for name, path in models.items():
if prefix is None:
prefix = name.split("-")[0]
else:
assert prefix == name.split("-")[0], "prefix should be identical."
SUPPORTED_MODELS[name] = path
if template is not None:
DEFAULT_TEMPLATE[prefix] = template
if vision:
VISION_MODELS.add(prefix)
register_model_group(
models={
"Aya-23-8B-Chat": {
DownloadSource.DEFAULT: "CohereForAI/aya-23-8B",
},
"Aya-23-35B-Chat": {
DownloadSource.DEFAULT: "CohereForAI/aya-23-35B",
},
},
template="cohere",
)
register_model_group(
models={
"Baichuan-7B-Base": {
DownloadSource.DEFAULT: "baichuan-inc/Baichuan-7B",
DownloadSource.MODELSCOPE: "baichuan-inc/baichuan-7B",
},
"Baichuan-13B-Base": {
DownloadSource.DEFAULT: "baichuan-inc/Baichuan-13B-Base",
DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan-13B-Base",
},
"Baichuan-13B-Chat": {
DownloadSource.DEFAULT: "baichuan-inc/Baichuan-13B-Chat",
DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan-13B-Chat",
},
},
template="baichuan",
)
register_model_group(
models={
"Baichuan2-7B-Base": {
DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-7B-Base",
DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-7B-Base",
},
"Baichuan2-13B-Base": {
DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-13B-Base",
DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-13B-Base",
},
"Baichuan2-7B-Chat": {
DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-7B-Chat",
DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-7B-Chat",
},
"Baichuan2-13B-Chat": {
DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-13B-Chat",
DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-13B-Chat",
},
},
template="baichuan2",
)
register_model_group(
models={
"BLOOM-560M": {
DownloadSource.DEFAULT: "bigscience/bloom-560m",
DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-560m",
},
"BLOOM-3B": {
DownloadSource.DEFAULT: "bigscience/bloom-3b",
DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-3b",
},
"BLOOM-7B1": {
DownloadSource.DEFAULT: "bigscience/bloom-7b1",
DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-7b1",
},
},
)
register_model_group(
models={
"BLOOMZ-560M": {
DownloadSource.DEFAULT: "bigscience/bloomz-560m",
DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-560m",
},
"BLOOMZ-3B": {
DownloadSource.DEFAULT: "bigscience/bloomz-3b",
DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-3b",
},
"BLOOMZ-7B1-mt": {
DownloadSource.DEFAULT: "bigscience/bloomz-7b1-mt",
DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-7b1-mt",
},
},
)
register_model_group(
models={
"BlueLM-7B-Base": {
DownloadSource.DEFAULT: "vivo-ai/BlueLM-7B-Base",
DownloadSource.MODELSCOPE: "vivo-ai/BlueLM-7B-Base",
},
"BlueLM-7B-Chat": {
DownloadSource.DEFAULT: "vivo-ai/BlueLM-7B-Chat",
DownloadSource.MODELSCOPE: "vivo-ai/BlueLM-7B-Chat",
},
},
template="bluelm",
)
register_model_group(
models={
"Breeze-7B": {
DownloadSource.DEFAULT: "MediaTek-Research/Breeze-7B-Base-v1_0",
},
"Breeze-7B-Chat": {
DownloadSource.DEFAULT: "MediaTek-Research/Breeze-7B-Instruct-v1_0",
},
},
template="breeze",
)
register_model_group(
models={
"ChatGLM2-6B-Chat": {
DownloadSource.DEFAULT: "THUDM/chatglm2-6b",
DownloadSource.MODELSCOPE: "ZhipuAI/chatglm2-6b",
}
},
template="chatglm2",
)
register_model_group(
models={
"ChatGLM3-6B-Base": {
DownloadSource.DEFAULT: "THUDM/chatglm3-6b-base",
DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b-base",
},
"ChatGLM3-6B-Chat": {
DownloadSource.DEFAULT: "THUDM/chatglm3-6b",
DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b",
},
},
template="chatglm3",
)
register_model_group(
models={
"ChineseLLaMA2-1.3B": {
DownloadSource.DEFAULT: "hfl/chinese-llama-2-1.3b",
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-1.3b",
},
"ChineseLLaMA2-7B": {
DownloadSource.DEFAULT: "hfl/chinese-llama-2-7b",
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-7b",
},
"ChineseLLaMA2-13B": {
DownloadSource.DEFAULT: "hfl/chinese-llama-2-13b",
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-13b",
},
"ChineseLLaMA2-1.3B-Chat": {
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-1.3b",
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-1.3b",
},
"ChineseLLaMA2-7B-Chat": {
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-7b",
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-7b",
},
"ChineseLLaMA2-13B-Chat": {
DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-13b",
DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-13b",
},
},
template="llama2_zh",
)
register_model_group(
models={
"CodeGemma-7B": {
DownloadSource.DEFAULT: "google/codegemma-7b",
},
"CodeGemma-7B-Chat": {
DownloadSource.DEFAULT: "google/codegemma-7b-it",
DownloadSource.MODELSCOPE: "AI-ModelScope/codegemma-7b-it",
},
"CodeGemma-1.1-2B": {
DownloadSource.DEFAULT: "google/codegemma-1.1-2b",
},
"CodeGemma-1.1-7B-Chat": {
DownloadSource.DEFAULT: "google/codegemma-1.1-7b-it",
},
},
template="gemma",
)
register_model_group(
models={
"Codestral-22B-v0.1-Chat": {
DownloadSource.DEFAULT: "mistralai/Codestral-22B-v0.1",
},
},
template="mistral",
)
register_model_group(
models={
"CommandR-35B-Chat": {
DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-v01",
DownloadSource.MODELSCOPE: "AI-ModelScope/c4ai-command-r-v01",
},
"CommandR-Plus-104B-Chat": {
DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-plus",
DownloadSource.MODELSCOPE: "AI-ModelScope/c4ai-command-r-plus",
},
"CommandR-35B-4bit-Chat": {
DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-v01-4bit",
DownloadSource.MODELSCOPE: "mirror013/c4ai-command-r-v01-4bit",
},
"CommandR-Plus-104B-4bit-Chat": {
DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-plus-4bit",
},
},
template="cohere",
)
register_model_group(
models={
"DBRX-132B-Base": {
DownloadSource.DEFAULT: "databricks/dbrx-base",
DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-base",
},
"DBRX-132B-Chat": {
DownloadSource.DEFAULT: "databricks/dbrx-instruct",
DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-instruct",
},
},
template="dbrx",
)
register_model_group(
models={
"DeepSeek-LLM-7B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-7b-base",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-7b-base",
},
"DeepSeek-LLM-67B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-67b-base",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-67b-base",
},
"DeepSeek-LLM-7B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-7b-chat",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-7b-chat",
},
"DeepSeek-LLM-67B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-67b-chat",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-67b-chat",
},
"DeepSeek-Math-7B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-math-7b-base",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-math-7b-base",
},
"DeepSeek-Math-7B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-math-7b-instruct",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-math-7b-instruct",
},
"DeepSeek-MoE-16B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-moe-16b-base",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-moe-16b-base",
},
"DeepSeek-MoE-16B-v2-Base": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Lite",
DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Lite",
},
"DeepSeek-MoE-236B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2",
DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2",
},
"DeepSeek-MoE-16B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-moe-16b-chat",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-moe-16b-chat",
},
"DeepSeek-MoE-16B-v2-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Lite-Chat",
DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Lite-Chat",
},
"DeepSeek-MoE-236B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Chat",
DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Chat",
},
"DeepSeek-MoE-Coder-16B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Lite-Base",
},
"DeepSeek-MoE-Coder-236B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Base",
},
"DeepSeek-MoE-Coder-16B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct",
},
"DeepSeek-MoE-Coder-236B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Instruct",
},
},
template="deepseek",
)
register_model_group(
models={
"DeepSeekCoder-6.7B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-base",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-base",
},
"DeepSeekCoder-7B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-base-v1.5",
},
"DeepSeekCoder-33B-Base": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-base",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-base",
},
"DeepSeekCoder-6.7B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-instruct",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-instruct",
},
"DeepSeekCoder-7B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-instruct-v1.5",
},
"DeepSeekCoder-33B-Chat": {
DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-instruct",
DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-instruct",
},
},
template="deepseekcoder",
)
register_model_group(
models={
"Falcon-7B": {
DownloadSource.DEFAULT: "tiiuae/falcon-7b",
DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-7b",
},
"Falcon-11B": {
DownloadSource.DEFAULT: "tiiuae/falcon-11B",
},
"Falcon-40B": {
DownloadSource.DEFAULT: "tiiuae/falcon-40b",
DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-40b",
},
"Falcon-180B": {
DownloadSource.DEFAULT: "tiiuae/falcon-180b",
DownloadSource.MODELSCOPE: "modelscope/falcon-180B",
},
"Falcon-7B-Chat": {
DownloadSource.DEFAULT: "tiiuae/falcon-7b-instruct",
DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-7b-instruct",
},
"Falcon-40B-Chat": {
DownloadSource.DEFAULT: "tiiuae/falcon-40b-instruct",
DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-40b-instruct",
},
"Falcon-180B-Chat": {
DownloadSource.DEFAULT: "tiiuae/falcon-180b-chat",
DownloadSource.MODELSCOPE: "modelscope/falcon-180B-chat",
},
},
template="falcon",
)
register_model_group(
models={
"Gemma-2B": {
DownloadSource.DEFAULT: "google/gemma-2b",
DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-2b",
},
"Gemma-7B": {
DownloadSource.DEFAULT: "google/gemma-7b",
DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-2b-it",
},
"Gemma-2B-Chat": {
DownloadSource.DEFAULT: "google/gemma-2b-it",
DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-7b",
},
"Gemma-7B-Chat": {
DownloadSource.DEFAULT: "google/gemma-7b-it",
DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-7b-it",
},
"Gemma-1.1-2B-Chat": {
DownloadSource.DEFAULT: "google/gemma-1.1-2b-it",
},
"Gemma-1.1-7B-Chat": {
DownloadSource.DEFAULT: "google/gemma-1.1-7b-it",
},
},
template="gemma",
)
register_model_group(
models={
"GLM-4-9B": {
DownloadSource.DEFAULT: "THUDM/glm-4-9b",
DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b",
},
"GLM-4-9B-Chat": {
DownloadSource.DEFAULT: "THUDM/glm-4-9b-chat",
DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat",
},
"GLM-4-9B-1M-Chat": {
DownloadSource.DEFAULT: "THUDM/glm-4-9b-chat-1m",
DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat-1m",
},
},
template="glm4",
)
register_model_group(
models={
"InternLM-7B": {
DownloadSource.DEFAULT: "internlm/internlm-7b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm-7b",
},
"InternLM-20B": {
DownloadSource.DEFAULT: "internlm/internlm-20b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm-20b",
},
"InternLM-7B-Chat": {
DownloadSource.DEFAULT: "internlm/internlm-chat-7b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm-chat-7b",
},
"InternLM-20B-Chat": {
DownloadSource.DEFAULT: "internlm/internlm-chat-20b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm-chat-20b",
},
},
template="intern",
)
register_model_group(
models={
"InternLM2-7B": {
DownloadSource.DEFAULT: "internlm/internlm2-7b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-7b",
},
"InternLM2-20B": {
DownloadSource.DEFAULT: "internlm/internlm2-20b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-20b",
},
"InternLM2-7B-Chat": {
DownloadSource.DEFAULT: "internlm/internlm2-chat-7b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-chat-7b",
},
"InternLM2-20B-Chat": {
DownloadSource.DEFAULT: "internlm/internlm2-chat-20b",
DownloadSource.MODELSCOPE: "Shanghai_AI_Laboratory/internlm2-chat-20b",
},
},
template="intern2",
)
register_model_group(
models={
"Jamba-v0.1": {
DownloadSource.DEFAULT: "ai21labs/Jamba-v0.1",
DownloadSource.MODELSCOPE: "AI-ModelScope/Jamba-v0.1",
}
},
)
register_model_group(
models={
"LingoWhale-8B": {
DownloadSource.DEFAULT: "deeplang-ai/LingoWhale-8B",
DownloadSource.MODELSCOPE: "DeepLang/LingoWhale-8B",
}
},
)
register_model_group(
models={
"LLaMA-7B": {
DownloadSource.DEFAULT: "huggyllama/llama-7b",
DownloadSource.MODELSCOPE: "skyline2006/llama-7b",
},
"LLaMA-13B": {
DownloadSource.DEFAULT: "huggyllama/llama-13b",
DownloadSource.MODELSCOPE: "skyline2006/llama-13b",
},
"LLaMA-30B": {
DownloadSource.DEFAULT: "huggyllama/llama-30b",
DownloadSource.MODELSCOPE: "skyline2006/llama-30b",
},
"LLaMA-65B": {
DownloadSource.DEFAULT: "huggyllama/llama-65b",
DownloadSource.MODELSCOPE: "skyline2006/llama-65b",
},
}
)
register_model_group(
models={
"LLaMA2-7B": {
DownloadSource.DEFAULT: "meta-llama/Llama-2-7b-hf",
DownloadSource.MODELSCOPE: "modelscope/Llama-2-7b-ms",
},
"LLaMA2-13B": {
DownloadSource.DEFAULT: "meta-llama/Llama-2-13b-hf",
DownloadSource.MODELSCOPE: "modelscope/Llama-2-13b-ms",
},
"LLaMA2-70B": {
DownloadSource.DEFAULT: "meta-llama/Llama-2-70b-hf",
DownloadSource.MODELSCOPE: "modelscope/Llama-2-70b-ms",
},
"LLaMA2-7B-Chat": {
DownloadSource.DEFAULT: "meta-llama/Llama-2-7b-chat-hf",
DownloadSource.MODELSCOPE: "modelscope/Llama-2-7b-chat-ms",
},
"LLaMA2-13B-Chat": {
DownloadSource.DEFAULT: "meta-llama/Llama-2-13b-chat-hf",
DownloadSource.MODELSCOPE: "modelscope/Llama-2-13b-chat-ms",
},
"LLaMA2-70B-Chat": {
DownloadSource.DEFAULT: "meta-llama/Llama-2-70b-chat-hf",
DownloadSource.MODELSCOPE: "modelscope/Llama-2-70b-chat-ms",
},
},
template="llama2",
)
register_model_group(
models={
"LLaMA3-8B": {
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-8B",
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-8B",
},
"LLaMA3-70B": {
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B",
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B",
},
"LLaMA3-8B-Chat": {
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-8B-Instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-8B-Instruct",
},
"LLaMA3-70B-Chat": {
DownloadSource.DEFAULT: "meta-llama/Meta-Llama-3-70B-Instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Meta-Llama-3-70B-Instruct",
},
"LLaMA3-8B-Chinese-Chat": {
DownloadSource.DEFAULT: "shenzhi-wang/Llama3-8B-Chinese-Chat",
DownloadSource.MODELSCOPE: "LLM-Research/Llama3-8B-Chinese-Chat",
},
"LLaMA3-70B-Chinese-Chat": {
DownloadSource.DEFAULT: "shenzhi-wang/Llama3-70B-Chinese-Chat",
},
},
template="llama3",
)
register_model_group(
models={
"LLaVA1.5-7B-Chat": {
DownloadSource.DEFAULT: "llava-hf/llava-1.5-7b-hf",
},
"LLaVA1.5-13B-Chat": {
DownloadSource.DEFAULT: "llava-hf/llava-1.5-13b-hf",
},
},
template="vicuna",
vision=True,
)
register_model_group(
models={
"MiniCPM-2B-SFT-Chat": {
DownloadSource.DEFAULT: "openbmb/MiniCPM-2B-sft-bf16",
DownloadSource.MODELSCOPE: "OpenBMB/miniCPM-bf16",
},
"MiniCPM-2B-DPO-Chat": {
DownloadSource.DEFAULT: "openbmb/MiniCPM-2B-dpo-bf16",
DownloadSource.MODELSCOPE: "OpenBMB/MiniCPM-2B-dpo-bf16",
},
},
template="cpm",
)
register_model_group(
models={
"Mistral-7B-v0.1": {
DownloadSource.DEFAULT: "mistralai/Mistral-7B-v0.1",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-v0.1",
},
"Mistral-7B-v0.1-Chat": {
DownloadSource.DEFAULT: "mistralai/Mistral-7B-Instruct-v0.1",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-Instruct-v0.1",
},
"Mistral-7B-v0.2": {
DownloadSource.DEFAULT: "alpindale/Mistral-7B-v0.2-hf",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-v0.2-hf",
},
"Mistral-7B-v0.2-Chat": {
DownloadSource.DEFAULT: "mistralai/Mistral-7B-Instruct-v0.2",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mistral-7B-Instruct-v0.2",
},
"Mistral-7B-v0.3": {
DownloadSource.DEFAULT: "mistralai/Mistral-7B-v0.3",
},
"Mistral-7B-v0.3-Chat": {
DownloadSource.DEFAULT: "mistralai/Mistral-7B-Instruct-v0.3",
},
},
template="mistral",
)
register_model_group(
models={
"Mixtral-8x7B-v0.1": {
DownloadSource.DEFAULT: "mistralai/Mixtral-8x7B-v0.1",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x7B-v0.1",
},
"Mixtral-8x7B-v0.1-Chat": {
DownloadSource.DEFAULT: "mistralai/Mixtral-8x7B-Instruct-v0.1",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x7B-Instruct-v0.1",
},
"Mixtral-8x22B-v0.1": {
DownloadSource.DEFAULT: "mistralai/Mixtral-8x22B-v0.1",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x22B-v0.1",
},
"Mixtral-8x22B-v0.1-Chat": {
DownloadSource.DEFAULT: "mistralai/Mixtral-8x22B-Instruct-v0.1",
DownloadSource.MODELSCOPE: "AI-ModelScope/Mixtral-8x22B-Instruct-v0.1",
},
},
template="mistral",
)
register_model_group(
models={
"OLMo-1B": {
DownloadSource.DEFAULT: "allenai/OLMo-1B-hf",
},
"OLMo-7B": {
DownloadSource.DEFAULT: "allenai/OLMo-7B-hf",
},
"OLMo-7B-Chat": {
DownloadSource.DEFAULT: "ssec-uw/OLMo-7B-Instruct-hf",
},
"OLMo-1.7-7B": {
DownloadSource.DEFAULT: "allenai/OLMo-1.7-7B-hf",
},
},
)
register_model_group(
models={
"OpenChat3.5-7B-Chat": {
DownloadSource.DEFAULT: "openchat/openchat-3.5-0106",
DownloadSource.MODELSCOPE: "xcwzxcwz/openchat-3.5-0106",
}
},
template="openchat",
)
register_model_group(
models={
"OpenChat3.6-8B-Chat": {
DownloadSource.DEFAULT: "openchat/openchat-3.6-8b-20240522",
}
},
template="openchat-3.6",
)
register_model_group(
models={
"Orion-14B-Base": {
DownloadSource.DEFAULT: "OrionStarAI/Orion-14B-Base",
DownloadSource.MODELSCOPE: "OrionStarAI/Orion-14B-Base",
},
"Orion-14B-Chat": {
DownloadSource.DEFAULT: "OrionStarAI/Orion-14B-Chat",
DownloadSource.MODELSCOPE: "OrionStarAI/Orion-14B-Chat",
},
"Orion-14B-Long-Chat": {
DownloadSource.DEFAULT: "OrionStarAI/Orion-14B-LongChat",
DownloadSource.MODELSCOPE: "OrionStarAI/Orion-14B-LongChat",
},
"Orion-14B-RAG-Chat": {
DownloadSource.DEFAULT: "OrionStarAI/Orion-14B-Chat-RAG",
DownloadSource.MODELSCOPE: "OrionStarAI/Orion-14B-Chat-RAG",
},
"Orion-14B-Plugin-Chat": {
DownloadSource.DEFAULT: "OrionStarAI/Orion-14B-Chat-Plugin",
DownloadSource.MODELSCOPE: "OrionStarAI/Orion-14B-Chat-Plugin",
},
},
template="orion",
)
register_model_group(
models={
"PaliGemma-3B-pt-224": {
DownloadSource.DEFAULT: "google/paligemma-3b-pt-224",
DownloadSource.MODELSCOPE: "AI-ModelScope/paligemma-3b-pt-224",
},
"PaliGemma-3B-pt-448": {
DownloadSource.DEFAULT: "google/paligemma-3b-pt-448",
DownloadSource.MODELSCOPE: "AI-ModelScope/paligemma-3b-pt-448",
},
"PaliGemma-3B-pt-896": {
DownloadSource.DEFAULT: "google/paligemma-3b-pt-896",
DownloadSource.MODELSCOPE: "AI-ModelScope/paligemma-3b-pt-896",
},
"PaliGemma-3B-mix-224": {
DownloadSource.DEFAULT: "google/paligemma-3b-mix-224",
DownloadSource.MODELSCOPE: "AI-ModelScope/paligemma-3b-mix-224",
},
"PaliGemma-3B-mix-448": {
DownloadSource.DEFAULT: "google/paligemma-3b-mix-448",
DownloadSource.MODELSCOPE: "AI-ModelScope/paligemma-3b-mix-448",
},
},
vision=True,
)
register_model_group(
models={
"Phi-1.5-1.3B": {
DownloadSource.DEFAULT: "microsoft/phi-1_5",
DownloadSource.MODELSCOPE: "allspace/PHI_1-5",
},
"Phi-2-2.7B": {
DownloadSource.DEFAULT: "microsoft/phi-2",
DownloadSource.MODELSCOPE: "AI-ModelScope/phi-2",
},
}
)
register_model_group(
models={
"Phi3-4B-4k-Chat": {
DownloadSource.DEFAULT: "microsoft/Phi-3-mini-4k-instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-4k-instruct",
},
"Phi3-4B-128k-Chat": {
DownloadSource.DEFAULT: "microsoft/Phi-3-mini-128k-instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-128k-instruct",
},
"Phi3-7B-8k-Chat": {
DownloadSource.DEFAULT: "microsoft/Phi-3-small-8k-instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-8k-instruct",
},
"Phi3-7B-128k-Chat": {
DownloadSource.DEFAULT: "microsoft/Phi-3-small-128k-instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-small-128k-instruct",
},
"Phi3-14B-8k-Chat": {
DownloadSource.DEFAULT: "microsoft/Phi-3-medium-4k-instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-4k-instruct",
},
"Phi3-14B-128k-Chat": {
DownloadSource.DEFAULT: "microsoft/Phi-3-medium-128k-instruct",
DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-medium-128k-instruct",
},
},
template="phi",
)
register_model_group(
models={
"Qwen-1.8B": {
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B",
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B",
},
"Qwen-7B": {
DownloadSource.DEFAULT: "Qwen/Qwen-7B",
DownloadSource.MODELSCOPE: "qwen/Qwen-7B",
},
"Qwen-14B": {
DownloadSource.DEFAULT: "Qwen/Qwen-14B",
DownloadSource.MODELSCOPE: "qwen/Qwen-14B",
},
"Qwen-72B": {
DownloadSource.DEFAULT: "Qwen/Qwen-72B",
DownloadSource.MODELSCOPE: "qwen/Qwen-72B",
},
"Qwen-1.8B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B-Chat",
},
"Qwen-7B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-7B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen-7B-Chat",
},
"Qwen-14B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-14B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen-14B-Chat",
},
"Qwen-72B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat",
},
"Qwen-1.8B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B-Chat-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B-Chat-Int8",
},
"Qwen-1.8B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-1_8B-Chat-Int4",
DownloadSource.MODELSCOPE: "qwen/Qwen-1_8B-Chat-Int4",
},
"Qwen-7B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-7B-Chat-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen-7B-Chat-Int8",
},
"Qwen-7B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-7B-Chat-Int4",
DownloadSource.MODELSCOPE: "qwen/Qwen-7B-Chat-Int4",
},
"Qwen-14B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-14B-Chat-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen-14B-Chat-Int8",
},
"Qwen-14B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-14B-Chat-Int4",
DownloadSource.MODELSCOPE: "qwen/Qwen-14B-Chat-Int4",
},
"Qwen-72B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int8",
},
"Qwen-72B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen-72B-Chat-Int4",
DownloadSource.MODELSCOPE: "qwen/Qwen-72B-Chat-Int4",
},
},
template="qwen",
)
register_model_group(
models={
"Qwen1.5-0.5B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B",
},
"Qwen1.5-1.8B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B",
},
"Qwen1.5-4B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B",
},
"Qwen1.5-7B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B",
},
"Qwen1.5-14B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B",
},
"Qwen1.5-32B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-32B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-32B",
},
"Qwen1.5-72B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B",
},
"Qwen1.5-110B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-110B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-110B",
},
"Qwen1.5-MoE-A2.7B": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B",
},
"Qwen1.5-Code-7B": {
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B",
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B",
},
"Qwen1.5-0.5B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat",
},
"Qwen1.5-1.8B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B-Chat",
},
"Qwen1.5-4B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B-Chat",
},
"Qwen1.5-7B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B-Chat",
},
"Qwen1.5-14B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B-Chat",
},
"Qwen1.5-32B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-32B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-32B-Chat",
},
"Qwen1.5-72B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B-Chat",
},
"Qwen1.5-110B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-110B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-110B-Chat",
},
"Qwen1.5-MoE-A2.7B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat",
},
"Qwen1.5-Code-7B-Chat": {
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat",
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat",
},
"Qwen1.5-0.5B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat-GPTQ-Int8",
},
"Qwen1.5-0.5B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-0.5B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-0.5B-Chat-AWQ",
},
"Qwen1.5-1.8B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B-Chat-GPTQ-Int8",
},
"Qwen1.5-1.8B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-1.8B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-1.8B-Chat-AWQ",
},
"Qwen1.5-4B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B-Chat-GPTQ-Int8",
},
"Qwen1.5-4B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-4B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-4B-Chat-AWQ",
},
"Qwen1.5-7B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B-Chat-GPTQ-Int8",
},
"Qwen1.5-7B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-7B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-7B-Chat-AWQ",
},
"Qwen1.5-14B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B-Chat-GPTQ-Int8",
},
"Qwen1.5-14B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-14B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-14B-Chat-AWQ",
},
"Qwen1.5-32B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-32B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-32B-Chat-AWQ",
},
"Qwen1.5-72B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B-Chat-GPTQ-Int8",
},
"Qwen1.5-72B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-72B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-72B-Chat-AWQ",
},
"Qwen1.5-110B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-110B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-110B-Chat-AWQ",
},
"Qwen1.5-MoE-A2.7B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4",
DownloadSource.MODELSCOPE: "qwen/Qwen1.5-MoE-A2.7B-Chat-GPTQ-Int4",
},
"Qwen1.5-Code-7B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/CodeQwen1.5-7B-Chat-AWQ",
DownloadSource.MODELSCOPE: "qwen/CodeQwen1.5-7B-Chat-AWQ",
},
},
template="qwen",
)
register_model_group(
models={
"Qwen2-0.5B": {
DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B",
DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B",
},
"Qwen2-1.5B": {
DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B",
DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B",
},
"Qwen2-7B": {
DownloadSource.DEFAULT: "Qwen/Qwen2-7B",
DownloadSource.MODELSCOPE: "qwen/Qwen2-7B",
},
"Qwen2-72B": {
DownloadSource.DEFAULT: "Qwen/Qwen2-72B",
DownloadSource.MODELSCOPE: "qwen/Qwen2-72B",
},
"Qwen2-MoE-57B": {
DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B",
DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B",
},
"Qwen2-0.5B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct",
DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct",
},
"Qwen2-1.5B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct",
DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct",
},
"Qwen2-7B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct",
DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct",
},
"Qwen2-72B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct",
DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct",
},
"Qwen2-MoE-57B-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B-Instruct",
DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B-Instruct",
},
"Qwen2-0.5B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct-GPTQ-Int8",
},
"Qwen2-0.5B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-0.5B-Instruct-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen2-0.5B-Instruct-AWQ",
},
"Qwen2-1.5B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct-GPTQ-Int8",
},
"Qwen2-1.5B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-1.5B-Instruct-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen2-1.5B-Instruct-AWQ",
},
"Qwen2-7B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct-GPTQ-Int8",
},
"Qwen2-7B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-7B-Instruct-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen2-7B-Instruct-AWQ",
},
"Qwen2-72B-int8-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct-GPTQ-Int8",
DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct-GPTQ-Int8",
},
"Qwen2-72B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-72B-Instruct-AWQ",
DownloadSource.MODELSCOPE: "qwen/Qwen2-72B-Instruct-AWQ",
},
"Qwen2-MoE-57B-int4-Chat": {
DownloadSource.DEFAULT: "Qwen/Qwen2-57B-A14B-Instruct-GPTQ-Int4",
DownloadSource.MODELSCOPE: "qwen/Qwen2-57B-A14B-Instruct-GPTQ-Int4",
},
},
template="qwen",
)
register_model_group(
models={
"SOLAR-10.7B": {
DownloadSource.DEFAULT: "upstage/SOLAR-10.7B-v1.0",
},
"SOLAR-10.7B-Chat": {
DownloadSource.DEFAULT: "upstage/SOLAR-10.7B-Instruct-v1.0",
DownloadSource.MODELSCOPE: "AI-ModelScope/SOLAR-10.7B-Instruct-v1.0",
},
},
template="solar",
)
register_model_group(
models={
"Skywork-13B-Base": {
DownloadSource.DEFAULT: "Skywork/Skywork-13B-base",
DownloadSource.MODELSCOPE: "skywork/Skywork-13B-base",
}
}
)
register_model_group(
models={
"StarCoder2-3B": {
DownloadSource.DEFAULT: "bigcode/starcoder2-3b",
DownloadSource.MODELSCOPE: "AI-ModelScope/starcoder2-3b",
},
"StarCoder2-7B": {
DownloadSource.DEFAULT: "bigcode/starcoder2-7b",
DownloadSource.MODELSCOPE: "AI-ModelScope/starcoder2-7b",
},
"StarCoder2-15B": {
DownloadSource.DEFAULT: "bigcode/starcoder2-15b",
DownloadSource.MODELSCOPE: "AI-ModelScope/starcoder2-15b",
},
}
)
register_model_group(
models={
"TeleChat-7B-Chat": {
DownloadSource.DEFAULT: "Tele-AI/telechat-7B",
DownloadSource.MODELSCOPE: "TeleAI/telechat-7B",
},
"TeleChat-12B-Chat": {
DownloadSource.DEFAULT: "Tele-AI/TeleChat-12B",
DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B",
},
"TeleChat-12B-v2-Chat": {
DownloadSource.DEFAULT: "Tele-AI/TeleChat-12B-v2",
DownloadSource.MODELSCOPE: "TeleAI/TeleChat-12B-v2",
},
},
template="telechat",
)
register_model_group(
models={
"Vicuna1.5-7B-Chat": {
DownloadSource.DEFAULT: "lmsys/vicuna-7b-v1.5",
DownloadSource.MODELSCOPE: "Xorbits/vicuna-7b-v1.5",
},
"Vicuna1.5-13B-Chat": {
DownloadSource.DEFAULT: "lmsys/vicuna-13b-v1.5",
DownloadSource.MODELSCOPE: "Xorbits/vicuna-13b-v1.5",
},
},
template="vicuna",
)
register_model_group(
models={
"XuanYuan-6B": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B",
},
"XuanYuan-70B": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B",
},
"XuanYuan-2-70B": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B",
},
"XuanYuan-6B-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B-Chat",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B-Chat",
},
"XuanYuan-70B-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat",
},
"XuanYuan-2-70B-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat",
},
"XuanYuan-6B-int8-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B-Chat-8bit",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B-Chat-8bit",
},
"XuanYuan-6B-int4-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-6B-Chat-4bit",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-6B-Chat-4bit",
},
"XuanYuan-70B-int8-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat-8bit",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat-8bit",
},
"XuanYuan-70B-int4-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan-70B-Chat-4bit",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan-70B-Chat-4bit",
},
"XuanYuan-2-70B-int8-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat-8bit",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat-8bit",
},
"XuanYuan-2-70B-int4-Chat": {
DownloadSource.DEFAULT: "Duxiaoman-DI/XuanYuan2-70B-Chat-4bit",
DownloadSource.MODELSCOPE: "Duxiaoman-DI/XuanYuan2-70B-Chat-4bit",
},
},
template="xuanyuan",
)
register_model_group(
models={
"XVERSE-7B": {
DownloadSource.DEFAULT: "xverse/XVERSE-7B",
DownloadSource.MODELSCOPE: "xverse/XVERSE-7B",
},
"XVERSE-13B": {
DownloadSource.DEFAULT: "xverse/XVERSE-13B",
DownloadSource.MODELSCOPE: "xverse/XVERSE-13B",
},
"XVERSE-65B": {
DownloadSource.DEFAULT: "xverse/XVERSE-65B",
DownloadSource.MODELSCOPE: "xverse/XVERSE-65B",
},
"XVERSE-65B-2": {
DownloadSource.DEFAULT: "xverse/XVERSE-65B-2",
DownloadSource.MODELSCOPE: "xverse/XVERSE-65B-2",
},
"XVERSE-7B-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-7B-Chat",
DownloadSource.MODELSCOPE: "xverse/XVERSE-7B-Chat",
},
"XVERSE-13B-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-13B-Chat",
DownloadSource.MODELSCOPE: "xverse/XVERSE-13B-Chat",
},
"XVERSE-65B-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-65B-Chat",
DownloadSource.MODELSCOPE: "xverse/XVERSE-65B-Chat",
},
"XVERSE-MoE-A4.2B": {
DownloadSource.DEFAULT: "xverse/XVERSE-MoE-A4.2B",
DownloadSource.MODELSCOPE: "xverse/XVERSE-MoE-A4.2B",
},
"XVERSE-7B-int8-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-7B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "xverse/XVERSE-7B-Chat-GPTQ-Int8",
},
"XVERSE-7B-int4-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-7B-Chat-GPTQ-Int4",
DownloadSource.MODELSCOPE: "xverse/XVERSE-7B-Chat-GPTQ-Int4",
},
"XVERSE-13B-int8-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-13B-Chat-GPTQ-Int8",
DownloadSource.MODELSCOPE: "xverse/XVERSE-13B-Chat-GPTQ-Int8",
},
"XVERSE-13B-int4-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-13B-Chat-GPTQ-Int4",
DownloadSource.MODELSCOPE: "xverse/XVERSE-13B-Chat-GPTQ-Int4",
},
"XVERSE-65B-int4-Chat": {
DownloadSource.DEFAULT: "xverse/XVERSE-65B-Chat-GPTQ-Int4",
DownloadSource.MODELSCOPE: "xverse/XVERSE-65B-Chat-GPTQ-Int4",
},
},
template="xverse",
)
register_model_group(
models={
"Yayi-7B": {
DownloadSource.DEFAULT: "wenge-research/yayi-7b-llama2",
DownloadSource.MODELSCOPE: "AI-ModelScope/yayi-7b-llama2",
},
"Yayi-13B": {
DownloadSource.DEFAULT: "wenge-research/yayi-13b-llama2",
DownloadSource.MODELSCOPE: "AI-ModelScope/yayi-13b-llama2",
},
},
template="yayi",
)
register_model_group(
models={
"Yi-6B": {
DownloadSource.DEFAULT: "01-ai/Yi-6B",
DownloadSource.MODELSCOPE: "01ai/Yi-6B",
},
"Yi-9B": {
DownloadSource.DEFAULT: "01-ai/Yi-9B",
DownloadSource.MODELSCOPE: "01ai/Yi-9B",
},
"Yi-34B": {
DownloadSource.DEFAULT: "01-ai/Yi-34B",
DownloadSource.MODELSCOPE: "01ai/Yi-34B",
},
"Yi-6B-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-6B-Chat",
DownloadSource.MODELSCOPE: "01ai/Yi-6B-Chat",
},
"Yi-34B-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat",
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat",
},
"Yi-6B-int8-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-6B-Chat-8bits",
DownloadSource.MODELSCOPE: "01ai/Yi-6B-Chat-8bits",
},
"Yi-6B-int4-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-6B-Chat-4bits",
DownloadSource.MODELSCOPE: "01ai/Yi-6B-Chat-4bits",
},
"Yi-34B-int8-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat-8bits",
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat-8bits",
},
"Yi-34B-int4-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-34B-Chat-4bits",
DownloadSource.MODELSCOPE: "01ai/Yi-34B-Chat-4bits",
},
"Yi-1.5-6B": {
DownloadSource.DEFAULT: "01-ai/Yi-1.5-6B",
DownloadSource.MODELSCOPE: "01ai/Yi-1.5-6B",
},
"Yi-1.5-9B": {
DownloadSource.DEFAULT: "01-ai/Yi-1.5-9B",
DownloadSource.MODELSCOPE: "01ai/Yi-1.5-9B",
},
"Yi-1.5-34B": {
DownloadSource.DEFAULT: "01-ai/Yi-1.5-34B",
DownloadSource.MODELSCOPE: "01ai/Yi-1.5-34B",
},
"Yi-1.5-6B-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-1.5-6B-Chat",
DownloadSource.MODELSCOPE: "01ai/Yi-1.5-6B-Chat",
},
"Yi-1.5-9B-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-1.5-9B-Chat",
DownloadSource.MODELSCOPE: "01ai/Yi-1.5-9B-Chat",
},
"Yi-1.5-34B-Chat": {
DownloadSource.DEFAULT: "01-ai/Yi-1.5-34B-Chat",
DownloadSource.MODELSCOPE: "01ai/Yi-1.5-34B-Chat",
},
},
template="yi",
)
register_model_group(
models={
"YiVL-6B-Chat": {
DownloadSource.DEFAULT: "BUAADreamer/Yi-VL-6B-hf",
},
"YiVL-34B-Chat": {
DownloadSource.DEFAULT: "BUAADreamer/Yi-VL-34B-hf",
},
},
template="yi_vl",
vision=True,
)
register_model_group(
models={
"Yuan2-2B-Chat": {
DownloadSource.DEFAULT: "IEITYuan/Yuan2-2B-hf",
DownloadSource.MODELSCOPE: "YuanLLM/Yuan2.0-2B-hf",
},
"Yuan2-51B-Chat": {
DownloadSource.DEFAULT: "IEITYuan/Yuan2-51B-hf",
DownloadSource.MODELSCOPE: "YuanLLM/Yuan2.0-51B-hf",
},
"Yuan2-102B-Chat": {
DownloadSource.DEFAULT: "IEITYuan/Yuan2-102B-hf",
DownloadSource.MODELSCOPE: "YuanLLM/Yuan2.0-102B-hf",
},
},
template="yuan",
)
register_model_group(
models={
"Zephyr-7B-Alpha-Chat": {
DownloadSource.DEFAULT: "HuggingFaceH4/zephyr-7b-alpha",
DownloadSource.MODELSCOPE: "AI-ModelScope/zephyr-7b-alpha",
},
"Zephyr-7B-Beta-Chat": {
DownloadSource.DEFAULT: "HuggingFaceH4/zephyr-7b-beta",
DownloadSource.MODELSCOPE: "modelscope/zephyr-7b-beta",
},
"Zephyr-141B-ORPO-Chat": {
DownloadSource.DEFAULT: "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
},
},
template="zephyr",
)
| LLaMA-Factory/src/llamafactory/extras/constants.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/extras/constants.py",
"repo_id": "LLaMA-Factory",
"token_count": 28697
} | 8 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import List, Literal, Optional
@dataclass
class FreezeArguments:
r"""
Arguments pertaining to the freeze (partial-parameter) training.
"""
freeze_trainable_layers: int = field(
default=2,
metadata={
"help": (
"The number of trainable layers for freeze (partial-parameter) fine-tuning. "
"Positive numbers mean the last n layers are set as trainable, "
"negative numbers mean the first n layers are set as trainable."
)
},
)
freeze_trainable_modules: str = field(
default="all",
metadata={
"help": (
"Name(s) of trainable modules for freeze (partial-parameter) fine-tuning. "
"Use commas to separate multiple modules. "
"Use `all` to specify all the available modules."
)
},
)
freeze_extra_modules: Optional[str] = field(
default=None,
metadata={
"help": (
"Name(s) of modules apart from hidden layers to be set as trainable "
"for freeze (partial-parameter) fine-tuning. "
"Use commas to separate multiple modules."
)
},
)
@dataclass
class LoraArguments:
r"""
Arguments pertaining to the LoRA training.
"""
additional_target: Optional[str] = field(
default=None,
metadata={
"help": (
"Name(s) of modules apart from LoRA layers to be set as trainable "
"and saved in the final checkpoint. "
"Use commas to separate multiple modules."
)
},
)
lora_alpha: Optional[int] = field(
default=None,
metadata={"help": "The scale factor for LoRA fine-tuning (default: lora_rank * 2)."},
)
lora_dropout: float = field(
default=0.0,
metadata={"help": "Dropout rate for the LoRA fine-tuning."},
)
lora_rank: int = field(
default=8,
metadata={"help": "The intrinsic dimension for LoRA fine-tuning."},
)
lora_target: str = field(
default="all",
metadata={
"help": (
"Name(s) of target modules to apply LoRA. "
"Use commas to separate multiple modules. "
"Use `all` to specify all the linear modules."
)
},
)
loraplus_lr_ratio: Optional[float] = field(
default=None,
metadata={"help": "LoRA plus learning rate ratio (lr_B / lr_A)."},
)
loraplus_lr_embedding: float = field(
default=1e-6,
metadata={"help": "LoRA plus learning rate for lora embedding layers."},
)
use_rslora: bool = field(
default=False,
metadata={"help": "Whether or not to use the rank stabilization scaling factor for LoRA layer."},
)
use_dora: bool = field(
default=False,
metadata={"help": "Whether or not to use the weight-decomposed lora method (DoRA)."},
)
pissa_init: bool = field(
default=False,
metadata={"help": "Whether or not to initialize a PiSSA adapter."},
)
pissa_iter: int = field(
default=4,
metadata={"help": "The number of iteration steps performed by FSVD in PiSSA. Use -1 to disable it."},
)
pissa_convert: bool = field(
default=False,
metadata={"help": "Whether or not to convert the PiSSA adapter to a normal LoRA adapter."},
)
create_new_adapter: bool = field(
default=False,
metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."},
)
@dataclass
class RLHFArguments:
r"""
Arguments pertaining to the PPO, DPO and KTO training.
"""
pref_beta: float = field(
default=0.1,
metadata={"help": "The beta parameter in the preference loss."},
)
pref_ftx: float = field(
default=0.0,
metadata={"help": "The supervised fine-tuning loss coefficient in DPO training."},
)
pref_loss: Literal["sigmoid", "hinge", "ipo", "kto_pair", "orpo", "simpo"] = field(
default="sigmoid",
metadata={"help": "The type of DPO loss to use."},
)
dpo_label_smoothing: float = field(
default=0.0,
metadata={"help": "The robust DPO label smoothing parameter in cDPO that should be between 0 and 0.5."},
)
kto_chosen_weight: float = field(
default=1.0,
metadata={"help": "The weight factor of the desirable losses in KTO training."},
)
kto_rejected_weight: float = field(
default=1.0,
metadata={"help": "The weight factor of the undesirable losses in KTO training."},
)
simpo_gamma: float = field(
default=0.5,
metadata={"help": "The target reward margin term in SimPO loss."},
)
ppo_buffer_size: int = field(
default=1,
metadata={"help": "The number of mini-batches to make experience buffer in a PPO optimization step."},
)
ppo_epochs: int = field(
default=4,
metadata={"help": "The number of epochs to perform in a PPO optimization step."},
)
ppo_score_norm: bool = field(
default=False,
metadata={"help": "Use score normalization in PPO training."},
)
ppo_target: float = field(
default=6.0,
metadata={"help": "Target KL value for adaptive KL control in PPO training."},
)
ppo_whiten_rewards: bool = field(
default=False,
metadata={"help": "Whiten the rewards before compute advantages in PPO training."},
)
ref_model: Optional[str] = field(
default=None,
metadata={"help": "Path to the reference model used for the PPO or DPO training."},
)
ref_model_adapters: Optional[str] = field(
default=None,
metadata={"help": "Path to the adapters of the reference model."},
)
ref_model_quantization_bit: Optional[int] = field(
default=None,
metadata={"help": "The number of bits to quantize the reference model."},
)
reward_model: Optional[str] = field(
default=None,
metadata={"help": "Path to the reward model used for the PPO training."},
)
reward_model_adapters: Optional[str] = field(
default=None,
metadata={"help": "Path to the adapters of the reward model."},
)
reward_model_quantization_bit: Optional[int] = field(
default=None,
metadata={"help": "The number of bits to quantize the reward model."},
)
reward_model_type: Literal["lora", "full", "api"] = field(
default="lora",
metadata={"help": "The type of the reward model in PPO training. Lora model only supports lora training."},
)
@dataclass
class GaloreArguments:
r"""
Arguments pertaining to the GaLore algorithm.
"""
use_galore: bool = field(
default=False,
metadata={"help": "Whether or not to use the gradient low-Rank projection (GaLore)."},
)
galore_target: str = field(
default="all",
metadata={
"help": (
"Name(s) of modules to apply GaLore. Use commas to separate multiple modules. "
"Use `all` to specify all the linear modules."
)
},
)
galore_rank: int = field(
default=16,
metadata={"help": "The rank of GaLore gradients."},
)
galore_update_interval: int = field(
default=200,
metadata={"help": "Number of steps to update the GaLore projection."},
)
galore_scale: float = field(
default=0.25,
metadata={"help": "GaLore scaling coefficient."},
)
galore_proj_type: Literal["std", "reverse_std", "right", "left", "full"] = field(
default="std",
metadata={"help": "Type of GaLore projection."},
)
galore_layerwise: bool = field(
default=False,
metadata={"help": "Whether or not to enable layer-wise update to further save memory."},
)
@dataclass
class BAdamArgument:
r"""
Arguments pertaining to the BAdam optimizer.
"""
use_badam: bool = field(
default=False,
metadata={"help": "Whether or not to use the BAdam optimizer."},
)
badam_mode: Literal["layer", "ratio"] = field(
default="layer",
metadata={"help": "Whether to use layer-wise or ratio-wise BAdam optimizer."},
)
badam_start_block: Optional[int] = field(
default=None,
metadata={"help": "The starting block index for layer-wise BAdam."},
)
badam_switch_mode: Optional[Literal["ascending", "descending", "random", "fixed"]] = field(
default="ascending",
metadata={"help": "the strategy of picking block to update for layer-wise BAdam."},
)
badam_switch_interval: Optional[int] = field(
default=50,
metadata={
"help": "Number of steps to update the block for layer-wise BAdam. Use -1 to disable the block update."
},
)
badam_update_ratio: float = field(
default=0.05,
metadata={"help": "The ratio of the update for ratio-wise BAdam."},
)
badam_mask_mode: Literal["adjacent", "scatter"] = field(
default="adjacent",
metadata={
"help": (
"The mode of the mask for BAdam optimizer. "
"`adjacent` means that the trainable parameters are adjacent to each other, "
"`scatter` means that trainable parameters are randomly choosed from the weight."
)
},
)
badam_verbose: int = field(
default=0,
metadata={
"help": (
"The verbosity level of BAdam optimizer. "
"0 for no print, 1 for print the block prefix, 2 for print trainable parameters."
)
},
)
@dataclass
class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments, GaloreArguments, BAdamArgument):
r"""
Arguments pertaining to which techniques we are going to fine-tuning with.
"""
pure_bf16: bool = field(
default=False,
metadata={"help": "Whether or not to train model in purely bf16 precision (without AMP)."},
)
stage: Literal["pt", "sft", "rm", "ppo", "dpo", "kto"] = field(
default="sft",
metadata={"help": "Which stage will be performed in training."},
)
finetuning_type: Literal["lora", "freeze", "full"] = field(
default="lora",
metadata={"help": "Which fine-tuning method to use."},
)
use_llama_pro: bool = field(
default=False,
metadata={"help": "Whether or not to make only the parameters in the expanded blocks trainable."},
)
freeze_vision_tower: bool = field(
default=True,
metadata={"help": "Whether ot not to freeze vision tower in MLLM training."},
)
train_mm_proj_only: bool = field(
default=False,
metadata={"help": "Whether or not to train the multimodal projector for MLLM only."},
)
plot_loss: bool = field(
default=False,
metadata={"help": "Whether or not to save the training loss curves."},
)
def __post_init__(self):
def split_arg(arg):
if isinstance(arg, str):
return [item.strip() for item in arg.split(",")]
return arg
self.freeze_trainable_modules: List[str] = split_arg(self.freeze_trainable_modules)
self.freeze_extra_modules: Optional[List[str]] = split_arg(self.freeze_extra_modules)
self.lora_alpha: int = self.lora_alpha or self.lora_rank * 2
self.lora_target: List[str] = split_arg(self.lora_target)
self.additional_target: Optional[List[str]] = split_arg(self.additional_target)
self.galore_target: List[str] = split_arg(self.galore_target)
self.freeze_vision_tower = self.freeze_vision_tower or self.train_mm_proj_only
self.use_ref_model = self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"]
assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method."
assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
assert self.reward_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
if self.stage == "ppo" and self.reward_model is None:
raise ValueError("`reward_model` is necessary for PPO training.")
if self.stage == "ppo" and self.reward_model_type == "lora" and self.finetuning_type != "lora":
raise ValueError("`reward_model_type` cannot be lora for Freeze/Full PPO training.")
if self.stage == "dpo" and self.pref_loss != "sigmoid" and self.dpo_label_smoothing > 1e-6:
raise ValueError("`dpo_label_smoothing` is only valid for sigmoid loss function.")
if self.use_llama_pro and self.finetuning_type == "full":
raise ValueError("`use_llama_pro` is only valid for Freeze or LoRA training.")
if self.finetuning_type == "lora" and (self.use_galore or self.use_badam):
raise ValueError("Cannot use LoRA with GaLore or BAdam together.")
if self.use_galore and self.use_badam:
raise ValueError("Cannot use GaLore with BAdam together.")
if self.loraplus_lr_ratio is not None and self.finetuning_type != "lora":
raise ValueError("`loraplus_lr_ratio` is only valid for LoRA training.")
if self.pissa_convert and self.finetuning_type != "lora":
raise ValueError("`pissa_convert` is only valid for LoRA training.")
if self.pissa_convert and (self.stage in ["rm", "ppo", "kto"] or self.use_ref_model):
raise ValueError("Cannot use PiSSA for current training stage.")
if self.train_mm_proj_only and self.finetuning_type != "full":
raise ValueError("`train_mm_proj_only` is only valid for full training.")
| LLaMA-Factory/src/llamafactory/hparams/finetuning_args.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/hparams/finetuning_args.py",
"repo_id": "LLaMA-Factory",
"token_count": 6023
} | 9 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Sequence
import torch
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.utils.versions import require_version
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel
from ...hparams import ModelArguments
def _set_z3_leaf_modules(model: "PreTrainedModel", leaf_modules: Sequence["torch.nn.Module"]) -> None:
require_version("deepspeed>=0.13.0", "To fix: pip install deepspeed>=0.13.0")
from deepspeed.utils import set_z3_leaf_modules # type: ignore
set_z3_leaf_modules(model, leaf_modules)
def add_z3_leaf_module(model: "PreTrainedModel") -> None:
r"""
Sets module as a leaf module to skip partitioning in deepspeed zero3.
"""
if not is_deepspeed_zero3_enabled():
return
if getattr(model.config, "model_type", None) == "dbrx":
from transformers.models.dbrx.modeling_dbrx import DbrxFFN
_set_z3_leaf_modules(model, [DbrxFFN])
if getattr(model.config, "model_type", None) == "jamba":
from transformers.models.jamba.modeling_jamba import JambaSparseMoeBlock
_set_z3_leaf_modules(model, [JambaSparseMoeBlock])
if getattr(model.config, "model_type", None) == "jetmoe":
from transformers.models.jetmoe.modeling_jetmoe import JetMoeMoA, JetMoeMoE
_set_z3_leaf_modules(model, [JetMoeMoA, JetMoeMoE])
if getattr(model.config, "model_type", None) == "mixtral":
from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock
_set_z3_leaf_modules(model, [MixtralSparseMoeBlock])
if getattr(model.config, "model_type", None) == "qwen2moe":
from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock
_set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock])
def configure_moe(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool) -> None:
if model_args.moe_aux_loss_coef is not None:
if getattr(config, "model_type", None) in ["jamba", "mixtral", "qwen2_moe"]:
setattr(config, "router_aux_loss_coef", model_args.moe_aux_loss_coef)
elif getattr(config, "model_type", None) == "deepseek":
setattr(config, "aux_loss_alpha", model_args.moe_aux_loss_coef)
elif getattr(config, "model_type", None) == "jetmoe":
setattr(config, "aux_loss_coef", model_args.moe_aux_loss_coef)
if getattr(config, "model_type", None) in ["dbrx", "jamba", "jetmoe", "mixtral", "qwen2_moe"]:
setattr(config, "output_router_logits", is_trainable)
| LLaMA-Factory/src/llamafactory/model/model_utils/moe.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/model/model_utils/moe.py",
"repo_id": "LLaMA-Factory",
"token_count": 1216
} | 10 |
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's TRL library.
# https://github.com/huggingface/trl/blob/v0.8.0/examples/scripts/dpo.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, List, Optional
from ...data import PairwiseDataCollatorWithPadding, get_dataset, split_dataset
from ...extras.constants import IGNORE_INDEX
from ...extras.ploting import plot_loss
from ...hparams import ModelArguments
from ...model import load_model, load_tokenizer
from ..trainer_utils import create_modelcard_and_push, create_ref_model
from .trainer import CustomDPOTrainer
if TYPE_CHECKING:
from transformers import Seq2SeqTrainingArguments, TrainerCallback
from ...hparams import DataArguments, FinetuningArguments
def run_dpo(
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
callbacks: Optional[List["TrainerCallback"]] = None,
):
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
dataset = get_dataset(model_args, data_args, training_args, stage="rm", **tokenizer_module)
model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train)
data_collator = PairwiseDataCollatorWithPadding(
tokenizer=tokenizer,
pad_to_multiple_of=8,
label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id,
)
# Create reference model
if finetuning_args.use_ref_model:
if finetuning_args.ref_model is None and (not training_args.do_train): # use the model itself
ref_model = model
else:
ref_model = create_ref_model(model_args, finetuning_args)
else:
ref_model = None
# Update arguments
training_args.remove_unused_columns = False # important for pairwise dataset
# Initialize our Trainer
trainer = CustomDPOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
finetuning_args=finetuning_args,
data_collator=data_collator,
callbacks=callbacks,
**tokenizer_module,
**split_dataset(dataset, data_args, training_args),
)
# Training
if training_args.do_train:
train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
if trainer.is_world_process_zero() and finetuning_args.plot_loss:
plot_loss(training_args.output_dir, keys=["loss", "eval_loss", "rewards/accuracies"])
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate(metric_key_prefix="eval")
if id(model) == id(ref_model): # unable to compute rewards if reference model is the model itself
remove_keys = [key for key in metrics.keys() if "rewards" in key]
for key in remove_keys:
metrics.pop(key)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Create model card
create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args)
| LLaMA-Factory/src/llamafactory/train/dpo/workflow.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/train/dpo/workflow.py",
"repo_id": "LLaMA-Factory",
"token_count": 1428
} | 11 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, Generator, List, Union
from ...extras.constants import PEFT_METHODS
from ...extras.misc import torch_gc
from ...extras.packages import is_gradio_available
from ...train.tuner import export_model
from ..common import GPTQ_BITS, get_save_dir
from ..locales import ALERTS
if is_gradio_available():
import gradio as gr
if TYPE_CHECKING:
from gradio.components import Component
from ..engine import Engine
def can_quantize(checkpoint_path: Union[str, List[str]]) -> "gr.Dropdown":
if isinstance(checkpoint_path, list) and len(checkpoint_path) != 0:
return gr.Dropdown(value="none", interactive=False)
else:
return gr.Dropdown(interactive=True)
def save_model(
lang: str,
model_name: str,
model_path: str,
finetuning_type: str,
checkpoint_path: Union[str, List[str]],
template: str,
visual_inputs: bool,
export_size: int,
export_quantization_bit: int,
export_quantization_dataset: str,
export_device: str,
export_legacy_format: bool,
export_dir: str,
export_hub_model_id: str,
) -> Generator[str, None, None]:
error = ""
if not model_name:
error = ALERTS["err_no_model"][lang]
elif not model_path:
error = ALERTS["err_no_path"][lang]
elif not export_dir:
error = ALERTS["err_no_export_dir"][lang]
elif export_quantization_bit in GPTQ_BITS and not export_quantization_dataset:
error = ALERTS["err_no_dataset"][lang]
elif export_quantization_bit not in GPTQ_BITS and not checkpoint_path:
error = ALERTS["err_no_adapter"][lang]
elif export_quantization_bit in GPTQ_BITS and isinstance(checkpoint_path, list):
error = ALERTS["err_gptq_lora"][lang]
if error:
gr.Warning(error)
yield error
return
args = dict(
model_name_or_path=model_path,
finetuning_type=finetuning_type,
template=template,
visual_inputs=visual_inputs,
export_dir=export_dir,
export_hub_model_id=export_hub_model_id or None,
export_size=export_size,
export_quantization_bit=int(export_quantization_bit) if export_quantization_bit in GPTQ_BITS else None,
export_quantization_dataset=export_quantization_dataset,
export_device=export_device,
export_legacy_format=export_legacy_format,
)
if checkpoint_path:
if finetuning_type in PEFT_METHODS: # list
args["adapter_name_or_path"] = ",".join(
[get_save_dir(model_name, finetuning_type, adapter) for adapter in checkpoint_path]
)
else: # str
args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, checkpoint_path)
yield ALERTS["info_exporting"][lang]
export_model(args)
torch_gc()
yield ALERTS["info_exported"][lang]
def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
with gr.Row():
export_size = gr.Slider(minimum=1, maximum=100, value=1, step=1)
export_quantization_bit = gr.Dropdown(choices=["none"] + GPTQ_BITS, value="none")
export_quantization_dataset = gr.Textbox(value="data/c4_demo.json")
export_device = gr.Radio(choices=["cpu", "auto"], value="cpu")
export_legacy_format = gr.Checkbox()
with gr.Row():
export_dir = gr.Textbox()
export_hub_model_id = gr.Textbox()
checkpoint_path: gr.Dropdown = engine.manager.get_elem_by_id("top.checkpoint_path")
checkpoint_path.change(can_quantize, [checkpoint_path], [export_quantization_bit], queue=False)
export_btn = gr.Button()
info_box = gr.Textbox(show_label=False, interactive=False)
export_btn.click(
save_model,
[
engine.manager.get_elem_by_id("top.lang"),
engine.manager.get_elem_by_id("top.model_name"),
engine.manager.get_elem_by_id("top.model_path"),
engine.manager.get_elem_by_id("top.finetuning_type"),
engine.manager.get_elem_by_id("top.checkpoint_path"),
engine.manager.get_elem_by_id("top.template"),
engine.manager.get_elem_by_id("top.visual_inputs"),
export_size,
export_quantization_bit,
export_quantization_dataset,
export_device,
export_legacy_format,
export_dir,
export_hub_model_id,
],
[info_box],
)
return dict(
export_size=export_size,
export_quantization_bit=export_quantization_bit,
export_quantization_dataset=export_quantization_dataset,
export_device=export_device,
export_legacy_format=export_legacy_format,
export_dir=export_dir,
export_hub_model_id=export_hub_model_id,
export_btn=export_btn,
info_box=info_box,
)
| LLaMA-Factory/src/llamafactory/webui/components/export.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/webui/components/export.py",
"repo_id": "LLaMA-Factory",
"token_count": 2281
} | 12 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llamafactory.eval.template import get_eval_template
def test_eval_template_en():
support_set = [
{
"question": "Fewshot question",
"A": "Fewshot1",
"B": "Fewshot2",
"C": "Fewshot3",
"D": "Fewshot4",
"answer": "B",
}
]
example = {
"question": "Target question",
"A": "Target1",
"B": "Target2",
"C": "Target3",
"D": "Target4",
"answer": "C",
}
template = get_eval_template(name="en")
messages = template.format_example(example, support_set=support_set, subject_name="SubName")
assert messages == [
{
"role": "user",
"content": (
"The following are multiple choice questions (with answers) about SubName.\n\n"
"Fewshot question\nA. Fewshot1\nB. Fewshot2\nC. Fewshot3\nD. Fewshot4\nAnswer:"
),
},
{"role": "assistant", "content": "B"},
{
"role": "user",
"content": "Target question\nA. Target1\nB. Target2\nC. Target3\nD. Target4\nAnswer:",
},
{"role": "assistant", "content": "C"},
]
def test_eval_template_zh():
support_set = [
{
"question": "示例问题",
"A": "示例答案1",
"B": "示例答案2",
"C": "示例答案3",
"D": "示例答案4",
"answer": "B",
}
]
example = {
"question": "目标问题",
"A": "目标答案1",
"B": "目标答案2",
"C": "目标答案3",
"D": "目标答案4",
"answer": "C",
}
template = get_eval_template(name="zh")
messages = template.format_example(example, support_set=support_set, subject_name="主题")
assert messages == [
{
"role": "user",
"content": (
"以下是中国关于主题考试的单项选择题,请选出其中的正确答案。\n\n"
"示例问题\nA. 示例答案1\nB. 示例答案2\nC. 示例答案3\nD. 示例答案4\n答案:"
),
},
{"role": "assistant", "content": "B"},
{
"role": "user",
"content": "目标问题\nA. 目标答案1\nB. 目标答案2\nC. 目标答案3\nD. 目标答案4\n答案:",
},
{"role": "assistant", "content": "C"},
]
| LLaMA-Factory/tests/eval/test_eval_template.py/0 | {
"file_path": "LLaMA-Factory/tests/eval/test_eval_template.py",
"repo_id": "LLaMA-Factory",
"token_count": 1596
} | 13 |
language: cpp
cache: ccache
sudo: required
dist: trusty
services:
- docker
os:
- linux
env:
- JOB=PRE_COMMIT
addons:
apt:
packages:
- git
- python
- python-pip
- python2.7-dev
ssh_known_hosts: 13.229.163.131
before_install:
- sudo pip install -U virtualenv pre-commit pip -i https://pypi.tuna.tsinghua.edu.cn/simple
- docker pull paddlepaddle/paddle:latest
- git pull https://github.com/PaddlePaddle/PaddleDetection develop
script:
- exit_code=0
- .travis/precommit.sh || exit_code=$(( exit_code | $? ))
# - docker run -i --rm -v "$PWD:/py_unittest" paddlepaddle/paddle:latest /bin/bash -c
# 'cd /py_unittest; sh .travis/unittest.sh' || exit_code=$(( exit_code | $? ))
- if [ $exit_code -eq 0 ]; then true; else exit 1; fi;
notifications:
email:
on_success: change
on_failure: always
| PaddleDetection/.travis.yml/0 | {
"file_path": "PaddleDetection/.travis.yml",
"repo_id": "PaddleDetection",
"token_count": 350
} | 14 |
#!/usr/bin/env bash
set -xe
# Usage:CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${batch_size} ${fp_item} ${max_epoch} ${model_name}
python="python3.7"
# Parameter description
function _set_params(){
run_mode=${1:-"sp"} # sp|mp
batch_size=${2:-"2"}
fp_item=${3:-"fp32"} # fp32|fp16
max_epoch=${4:-"1"}
model_item=${5:-"model_item"}
run_log_path=${TRAIN_LOG_DIR:-$(pwd)}
# 添加日志解析需要的参数
base_batch_size=${batch_size}
mission_name="目标检测"
direction_id="0"
ips_unit="images/s"
skip_steps=10 # 解析日志,有些模型前几个step耗时长,需要跳过 (必填)
keyword="ips:" # 解析日志,筛选出数据所在行的关键字 (必填)
index="1"
model_name=${model_item}_bs${batch_size}_${fp_item}
device=${CUDA_VISIBLE_DEVICES//,/ }
arr=(${device})
num_gpu_devices=${#arr[*]}
log_file=${run_log_path}/${model_item}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices}
}
function _train(){
echo "Train on ${num_gpu_devices} GPUs"
echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"
# set runtime params
set_optimizer_lr_sp=" "
set_optimizer_lr_mp=" "
# parse model_item
case ${model_item} in
faster_rcnn) model_yml="benchmark/configs/faster_rcnn_r50_fpn_1x_coco.yml"
set_optimizer_lr_sp="LearningRate.base_lr=0.001" ;;
fcos) model_yml="configs/fcos/fcos_r50_fpn_1x_coco.yml"
set_optimizer_lr_sp="LearningRate.base_lr=0.001" ;;
deformable_detr) model_yml="configs/deformable_detr/deformable_detr_r50_1x_coco.yml" ;;
gfl) model_yml="configs/gfl/gfl_r50_fpn_1x_coco.yml"
set_optimizer_lr_sp="LearningRate.base_lr=0.001" ;;
hrnet) model_yml="configs/keypoint/hrnet/hrnet_w32_256x192.yml" ;;
higherhrnet) model_yml="configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml" ;;
solov2) model_yml="configs/solov2/solov2_r50_fpn_1x_coco.yml" ;;
jde) model_yml="configs/mot/jde/jde_darknet53_30e_1088x608.yml" ;;
fairmot) model_yml="configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml" ;;
*) echo "Undefined model_item"; exit 1;
esac
set_batch_size="TrainReader.batch_size=${batch_size}"
set_max_epoch="epoch=${max_epoch}"
set_log_iter="log_iter=1"
if [ ${fp_item} = "fp16" ]; then
set_fp_item="--fp16"
else
set_fp_item=" "
fi
case ${run_mode} in
sp) train_cmd="${python} -u tools/train.py -c ${model_yml} ${set_fp_item} \
-o ${set_batch_size} ${set_max_epoch} ${set_log_iter} ${set_optimizer_lr_sp}" ;;
mp) rm -rf mylog
train_cmd="${python} -m paddle.distributed.launch --log_dir=./mylog \
--gpus=${CUDA_VISIBLE_DEVICES} tools/train.py -c ${model_yml} ${set_fp_item} \
-o ${set_batch_size} ${set_max_epoch} ${set_log_iter} ${set_optimizer_lr_mp}"
log_parse_file="mylog/workerlog.0" ;;
*) echo "choose run_mode(sp or mp)"; exit 1;
esac
timeout 15m ${train_cmd} > ${log_file} 2>&1
if [ $? -ne 0 ];then
echo -e "${train_cmd}, FAIL"
export job_fail_flag=1
else
echo -e "${train_cmd}, SUCCESS"
export job_fail_flag=0
fi
kill -9 `ps -ef|grep 'python'|awk '{print $2}'`
if [ $run_mode = "mp" -a -d mylog ]; then
rm ${log_file}
cp mylog/workerlog.0 ${log_file}
fi
}
source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在联调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开
_set_params $@
# _train # 如果只想产出训练log,不解析,可取消注释
_run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
| PaddleDetection/benchmark/run_benchmark.sh/0 | {
"file_path": "PaddleDetection/benchmark/run_benchmark.sh",
"repo_id": "PaddleDetection",
"token_count": 2278
} | 15 |
Subsets and Splits