text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
package org.springframework.boot.autoconfigure.orm.jpa;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link HibernateJpaAutoConfiguration}.
*/
@Generated
public class HibernateJpaAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'hibernateJpaAutoConfiguration'.
*/
public static BeanDefinition getHibernateJpaAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(HibernateJpaAutoConfiguration.class);
beanDefinition.setInstanceSupplier(HibernateJpaAutoConfiguration::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/HibernateJpaAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/HibernateJpaAutoConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 225
} | 179 |
package org.springframework.boot.autoconfigure.task;
import java.lang.SuppressWarnings;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.boot.task.SimpleAsyncTaskSchedulerBuilder;
import org.springframework.boot.task.TaskSchedulerBuilder;
import org.springframework.boot.task.ThreadPoolTaskSchedulerBuilder;
/**
* Bean definitions for {@link TaskSchedulingConfigurations}.
*/
@Generated
public class TaskSchedulingConfigurations__BeanDefinitions {
/**
* Bean definitions for {@link TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration}.
*/
@Generated
public static class ThreadPoolTaskSchedulerBuilderConfiguration {
/**
* Get the bean definition for 'threadPoolTaskSchedulerBuilderConfiguration'.
*/
public static BeanDefinition getThreadPoolTaskSchedulerBuilderConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration.class);
beanDefinition.setInstanceSupplier(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'threadPoolTaskSchedulerBuilder'.
*/
private static BeanInstanceSupplier<ThreadPoolTaskSchedulerBuilder> getThreadPoolTaskSchedulerBuilderInstanceSupplier(
) {
return BeanInstanceSupplier.<ThreadPoolTaskSchedulerBuilder>forFactoryMethod(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration.class, "threadPoolTaskSchedulerBuilder", TaskSchedulingProperties.class, ObjectProvider.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration.class).threadPoolTaskSchedulerBuilder(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'threadPoolTaskSchedulerBuilder'.
*/
public static BeanDefinition getThreadPoolTaskSchedulerBuilderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ThreadPoolTaskSchedulerBuilder.class);
beanDefinition.setInstanceSupplier(getThreadPoolTaskSchedulerBuilderInstanceSupplier());
return beanDefinition;
}
}
/**
* Bean definitions for {@link TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration}.
*/
@Generated
public static class TaskSchedulerBuilderConfiguration {
/**
* Get the bean definition for 'taskSchedulerBuilderConfiguration'.
*/
public static BeanDefinition getTaskSchedulerBuilderConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration.class);
beanDefinition.setInstanceSupplier(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'taskSchedulerBuilder'.
*/
@SuppressWarnings("removal")
private static BeanInstanceSupplier<TaskSchedulerBuilder> getTaskSchedulerBuilderInstanceSupplier(
) {
return BeanInstanceSupplier.<TaskSchedulerBuilder>forFactoryMethod(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration.class, "taskSchedulerBuilder", TaskSchedulingProperties.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration.class).taskSchedulerBuilder(args.get(0), args.get(1)));
}
/**
* Get the bean definition for 'taskSchedulerBuilder'.
*/
@SuppressWarnings("removal")
public static BeanDefinition getTaskSchedulerBuilderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulerBuilder.class);
beanDefinition.setInstanceSupplier(getTaskSchedulerBuilderInstanceSupplier());
return beanDefinition;
}
}
/**
* Bean definitions for {@link TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration}.
*/
@Generated
public static class SimpleAsyncTaskSchedulerBuilderConfiguration {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.task.TaskSchedulingConfigurations$SimpleAsyncTaskSchedulerBuilderConfiguration'.
*/
private static BeanInstanceSupplier<TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration> getSimpleAsyncTaskSchedulerBuilderConfigurationInstanceSupplier(
) {
return BeanInstanceSupplier.<TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration>forConstructor(TaskSchedulingProperties.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> new TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration(args.get(0), args.get(1)));
}
/**
* Get the bean definition for 'simpleAsyncTaskSchedulerBuilderConfiguration'.
*/
public static BeanDefinition getSimpleAsyncTaskSchedulerBuilderConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration.class);
beanDefinition.setInstanceSupplier(getSimpleAsyncTaskSchedulerBuilderConfigurationInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'simpleAsyncTaskSchedulerBuilder'.
*/
private static BeanInstanceSupplier<SimpleAsyncTaskSchedulerBuilder> getSimpleAsyncTaskSchedulerBuilderInstanceSupplier(
) {
return BeanInstanceSupplier.<SimpleAsyncTaskSchedulerBuilder>forFactoryMethod(TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration.class, "simpleAsyncTaskSchedulerBuilder")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration.class).simpleAsyncTaskSchedulerBuilder());
}
/**
* Get the bean definition for 'simpleAsyncTaskSchedulerBuilder'.
*/
public static BeanDefinition getSimpleAsyncTaskSchedulerBuilderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(SimpleAsyncTaskSchedulerBuilder.class);
beanDefinition.setInstanceSupplier(getSimpleAsyncTaskSchedulerBuilderInstanceSupplier());
return beanDefinition;
}
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/task/TaskSchedulingConfigurations__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/task/TaskSchedulingConfigurations__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 2000
} | 180 |
package org.springframework.boot.autoconfigure.web.servlet;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory;
/**
* Bean definitions for {@link ServletWebServerFactoryConfiguration}.
*/
@Generated
public class ServletWebServerFactoryConfiguration__BeanDefinitions {
/**
* Bean definitions for {@link ServletWebServerFactoryConfiguration.EmbeddedTomcat}.
*/
@Generated
public static class EmbeddedTomcat {
/**
* Get the bean definition for 'embeddedTomcat'.
*/
public static BeanDefinition getEmbeddedTomcatBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ServletWebServerFactoryConfiguration.EmbeddedTomcat.class);
beanDefinition.setInstanceSupplier(ServletWebServerFactoryConfiguration.EmbeddedTomcat::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'tomcatServletWebServerFactory'.
*/
private static BeanInstanceSupplier<TomcatServletWebServerFactory> getTomcatServletWebServerFactoryInstanceSupplier(
) {
return BeanInstanceSupplier.<TomcatServletWebServerFactory>forFactoryMethod(ServletWebServerFactoryConfiguration.EmbeddedTomcat.class, "tomcatServletWebServerFactory", ObjectProvider.class, ObjectProvider.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(ServletWebServerFactoryConfiguration.EmbeddedTomcat.class).tomcatServletWebServerFactory(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'tomcatServletWebServerFactory'.
*/
public static BeanDefinition getTomcatServletWebServerFactoryBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TomcatServletWebServerFactory.class);
beanDefinition.setInstanceSupplier(getTomcatServletWebServerFactoryInstanceSupplier());
return beanDefinition;
}
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/ServletWebServerFactoryConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/ServletWebServerFactoryConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 685
} | 181 |
package org.springframework.data.jpa.mapping;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.data.jpa.repository.config.JpaMetamodelMappingContextFactoryBean;
/**
* Bean definitions for {@link JpaMetamodelMappingContext}.
*/
@Generated
public class JpaMetamodelMappingContext__BeanDefinitions {
/**
* Get the bean definition for 'jpaMappingContext'.
*/
public static BeanDefinition getJpaMappingContextBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JpaMetamodelMappingContextFactoryBean.class);
beanDefinition.setLazyInit(true);
beanDefinition.setInstanceSupplier(JpaMetamodelMappingContextFactoryBean::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/data/jpa/mapping/JpaMetamodelMappingContext__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/data/jpa/mapping/JpaMetamodelMappingContext__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 271
} | 182 |
/ Header Record For PersistentHashMapValueStorage | pgvector/build/kotlin/compileKotlin/cacheable/caches-jvm/jvm/kotlin/package-parts.tab.values.at/0 | {
"file_path": "pgvector/build/kotlin/compileKotlin/cacheable/caches-jvm/jvm/kotlin/package-parts.tab.values.at",
"repo_id": "pgvector",
"token_count": 20
} | 183 |
{
"bundles": [],
"resources": {
"includes": [
{
"condition": {
"typeReachable": "com.sun.jna.Native"
},
"pattern": "\\Qcom/sun/jna/linux-aarch64/libjnidispatch.so\\E"
},
{
"condition": {
"typeReachable": "com.sun.jna.Native"
},
"pattern": "\\Qcom/sun/jna/linux-x86-64/libjnidispatch.so\\E"
},
{
"condition": {
"typeReachable": "com.sun.jna.Native"
},
"pattern": "\\Qcom/sun/jna/win32-x86-64/jnidispatch.dll\\E"
},
{
"condition": {
"typeReachable": "com.sun.jna.Native"
},
"pattern": "\\Qcom/sun/jna/darwin-aarch64/libjnidispatch.jnilib\\E"
},
{
"condition": {
"typeReachable": "com.sun.jna.Native"
},
"pattern": "\\Qcom/sun/jna/darwin-x86-64/libjnidispatch.jnilib\\E"
}
]
}
}
| pgvector/build/native-reachability-metadata/META-INF/native-image/net.java.dev.jna/jna/5.13.0/resource-config.json/0 | {
"file_path": "pgvector/build/native-reachability-metadata/META-INF/native-image/net.java.dev.jna/jna/5.13.0/resource-config.json",
"repo_id": "pgvector",
"token_count": 534
} | 184 |
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%"=="" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%"=="" set DIRNAME=.
@rem This is normally unused
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
echo. 1>&2
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
echo. 1>&2
echo Please set the JAVA_HOME variable in your environment to match the 1>&2
echo location of your Java installation. 1>&2
goto fail
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
:end
@rem End local scope for the variables with windows NT shell
if %ERRORLEVEL% equ 0 goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
set EXIT_CODE=%ERRORLEVEL%
if %EXIT_CODE% equ 0 set EXIT_CODE=1
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
exit /b %EXIT_CODE%
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
| pgvector/gradlew.bat/0 | {
"file_path": "pgvector/gradlew.bat",
"repo_id": "pgvector",
"token_count": 972
} | 185 |
# 贡献者指引
*欢迎帮SWIFT提供Feature PR、Bug反馈、文档补充或其他类型的贡献!*
## 目录
- [代码规约](#-代码规约)
- [贡献流程](#-贡献流程)
- [资源支持](#-资源支持)
## 📖 代码规约
请查看我们的[代码规约文档](./CODE_OF_CONDUCT.md).
## 🔁 贡献流程
### 我们需要什么
- ROADMAP:我们为SWIFT提供了每个迭代的[ROADMAP](./ROADMAP.md),贡献者可以查看我们的ROADMAP来了解我们的开发进度和规划。在**待分配**中的feature可以认领并开发。
- 新技术和新模型:SWIFT需要支持更多的开源模型和数据集,或我们没有关注到的新技术,如果您对此有兴趣,可以提交PR给我们。
- 技术布道:如果您对技术布道有兴趣,欢迎在任何网站上帮我们撰写教程文档或视频等,并将链接发给我们。
- 社区供稿:您可以撰写和SWIFT有关的技术文章,并供稿给我们,我们审核通过后会在魔搭官方账号(知乎、公众号等)上进行发布,并属上您的名字。
### 激励
- 我们会以魔搭社区的身份给贡献者颁发电子证书,以鼓励您的无私贡献。
- 我们会赠送相关魔搭社区相关周边小礼品。
- 我们会赠送开发期间的免费A10算力,具体可以查看[资源支持](#-资源支持)章节。
### 提交PR(Pull Requests)
任何feature开发都在github上以先Fork后PR的形式进行。
1. Fork:进入[SWIFT](https://github.com/modelscope/swift)页面后,点击**Fork按钮**执行。完成后会在您的个人组织下克隆出一个SWIFT代码库
2. Clone:将第一步产生的代码库clone到本地并**拉新分支**进行开发,开发中请及时点击**Sync Fork按钮**同步`main`分支,防止代码过期并冲突
3. 提交PR:开发、测试完成后将代码推送到远程分支。在github上点击**Pull Requests页面**,新建一个PR,源分支选择您提交的代码分支,目标分支选择`modelscope/swift:main`分支
4. 撰写描述:在PR中填写良好的feature描述是必要的,让Reviewers知道您的修改内容
5. Review:我们希望合入的代码简洁高效,因此可能会提出一些问题并讨论。请注意,任何review中提出的问题是针对代码本身,而非您个人。在所有问题讨论通过后,您的代码会被通过
### 代码规范和开发方式
SWIFT有约定俗成的变量命名方式和开发方式。在开发中请尽量遵循这些方式。
1. 变量命名以下划线分割,类名以所有单词首字母大写方式命名
2. 所有的python缩进都是四个空格取代一个tab
3. 选用知名的开源库,避免使用闭源库或不稳定的开源库,避免重复造轮子
SWIFT在PR提交后会进行两类测试:
- Code Lint测试 对代码进行静态规范走查的测试,为保证改测试通过,请保证本地预先进行了Code lint。方法是:
```shell
pip install pre-commit
# 在swift文件夹内
pre-commit run --all-files
# 对pre-commit报的错误进行修改,直到所有的检查都是成功状态
```
- CI Tests 冒烟测试和单元测试,请查看下一章节
### Running CI Tests
在提交PR前,请保证您的开发代码已经受到了测试用例的保护。例如,对新功能的冒烟测试,或者各种边缘case的单元测试等。在代码review时Reviewers也会关注这一点。同时,也会有服务专门运行CI Tests,运行所有的测试用例,测试用例通过后代码才可以合并。
另外,由于运行时间过长,我们跳过了部分重要测试,为保证您的逻辑是正确的,可以在本地执行该测试:
```shell
python tests/llm/test_run.py
```
请保证该测试可以正常通过。
## ✅ 资源支持
SWIFT会为开发者提供资源支持,包括免费的GPU算力。如果需要请邮件联系我们([contact@modelscope.cn](mailto:contact@modelscope.cn))或加入我们的微信群:
<p align="left">
<img src="asset/wechat.png" width="250" style="display: inline-block;">
</p>
| swift/CONTRIBUTING_CN.md/0 | {
"file_path": "swift/CONTRIBUTING_CN.md",
"repo_id": "swift",
"token_count": 2752
} | 186 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/college_biology/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/college_biology/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 187 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/high_school_european_history/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/high_school_european_history/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 188 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/machine_learning/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/machine_learning/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 189 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/sociology/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/sociology/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 190 |
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
| swift/docs/make.bat/0 | {
"file_path": "swift/docs/make.bat",
"repo_id": "swift",
"token_count": 314
} | 191 |
## 🔥SCEdit
SCEdit由阿里巴巴通义实验室视觉智能团队(Alibaba TongYi Vision Intelligence Lab)所提出,是一个高效的生成式微调框架。该框架不仅支持文生图下游任务的微调能力,**相比LoRA节省30%-50%的训练显存开销**,实现快速迁移到特定的生成场景中;而且还可以**直接扩展到可控图像生成任务中,仅需ControlNet条件生成7.9%的参数量并节省30%的显存开销**,支持边缘图、深度图、分割图、姿态、颜色图、图像补全等条件生成任务。
我们使用了[风格迁移数据集](https://modelscope.cn/datasets/damo/style_custom_dataset/dataPeview)中的3D风格数据进行了训练,并使用相同的`Prompt: A boy in a camouflage jacket with a scarf`进行测试,具体的定性和定量的结果如下:
| Method | bs | ep | Target Module | Param. (M) | Mem. (MiB) | 3D style |
| --------- | ---- | ---- | ------------- | ------------- | ---------- | ------------------------------------------------------------ |
| LoRA/r=64 | 1 | 50 | q/k/v/out/mlp | 23.94 (2.20%) | 8440MiB | <img src="https://intranetproxy.alipay.com/skylark/lark/0/2023/png/167218/1703665229562-0f33bbb0-c492-41b4-9f37-3ae720dca80d.png" alt="img" style="zoom:20%;" /> |
| SCEdit | 1 | 50 | up_blocks | 19.68 (1.81%) | 7556MiB | <img src="https://intranetproxy.alipay.com/skylark/lark/0/2023/png/167218/1703665933913-74b98741-3b57-46a4-9871-539df3a0112c.png" alt="img" style="zoom:20%;" /> |
| LoRA/r=64 | 10 | 100 | q/k/v/out/mlp | 23.94 (2.20%) | 26300MiB | <img src="https://intranetproxy.alipay.com/skylark/lark/0/2023/png/167218/1703750608529-de20d0e7-bf9c-4928-8e59-73cc54f2c8d7.png" alt="img" style="zoom:20%;" /> |
| SCEdit | 10 | 100 | up_blocks | 19.68 (1.81%) | 18634MiB | <img src="https://intranetproxy.alipay.com/skylark/lark/0/2023/png/167218/1703663033092-94492e44-341f-4259-9df4-13c168e3b5d6.png" alt="img" style="zoom:20%;" /> |
| LoRA/r=64 | 30 | 200 | q/k/v/out/mlp | 23.94 (2.20%) | 69554MiB | <img src="https://intranetproxy.alipay.com/skylark/lark/0/2023/png/167218/1703750626635-2e368d7b-5e99-4a06-b189-8615f302bcd7.png" alt="img" style="zoom:20%;" /> |
| SCEdit | 30 | 200 | up_blocks | 19.68 (1.81%) | 43350MiB | <img src="https://intranetproxy.alipay.com/skylark/lark/0/2023/png/167218/1703662246942-1102b1f4-93ab-4653-b943-3302f2a5259e.png" alt="img" style="zoom:20%;" /> |
使用SCEdit执行训练任务并复现上述结果:
```shell
# 先执行下面章节的安装步骤
cd examples/pytorch/multi_modal/notebook
python text_to_image_synthesis.py
```
| swift/docs/source/GetStarted/SCEdit.md/0 | {
"file_path": "swift/docs/source/GetStarted/SCEdit.md",
"repo_id": "swift",
"token_count": 1494
} | 192 |
# NPU训练最佳实践
作者: [chuanzhubin](https://github.com/chuanzhubin), [jintao](https://github.com/Jintao-Huang)
## 目录
- [环境准备](#环境准备)
- [微调](#微调)
- [推理](#推理)
- [部署](#部署)
## 环境准备
实验环境:8 * 昇腾910B3 64G (设备由[@chuanzhubin](https://github.com/chuanzhubin)提供, 感谢对modelscope和swift的支持~)
```shell
# 创建新的conda虚拟环境(可选)
conda create -n swift-npu python=3.10 -y
conda activate swift-npu
# 设置pip全局镜像 (可选,加速下载)
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple/
# 安装ms-swift(当前推荐从源码安装, 待发版后可直接pip安装)
git clone https://github.com/modelscope/swift.git
cd swift
pip install -e '.[llm]'
# 安装torch-npu
pip install torch-npu decorator
# 如果你想要使用deepspeed (控制显存占用,训练速度会有一定下降)
pip install deepspeed
# 环境对齐 (通常不需要运行. 如果你运行错误, 可以跑下面的代码, 仓库使用最新环境测试)
pip install -r requirements/framework.txt -U
pip install -r requirements/llm.txt -U
```
测试环境是否安装正确,NPU能否被正常加载:
```python
from transformers.utils import is_torch_npu_available
import torch
print(is_torch_npu_available()) # True
print(torch.npu.device_count()) # 8
print(torch.randn(10, device='npu:0'))
```
查看NPU的P2P连接,这里看到每个NPU都通过7条HCCS与其他NPU互联
```shell
(valle) root@valle:~/src# npu-smi info -t topo
NPU0 NPU1 NPU2 NPU3 NPU4 NPU5 NPU6 NPU7 CPU Affinity
NPU0 X HCCS HCCS HCCS HCCS HCCS HCCS HCCS 144-167
NPU1 HCCS X HCCS HCCS HCCS HCCS HCCS HCCS 144-167
NPU2 HCCS HCCS X HCCS HCCS HCCS HCCS HCCS 96-119
NPU3 HCCS HCCS HCCS X HCCS HCCS HCCS HCCS 96-119
NPU4 HCCS HCCS HCCS HCCS X HCCS HCCS HCCS 0-23
NPU5 HCCS HCCS HCCS HCCS HCCS X HCCS HCCS 0-23
NPU6 HCCS HCCS HCCS HCCS HCCS HCCS X HCCS 48-71
NPU7 HCCS HCCS HCCS HCCS HCCS HCCS HCCS X 48-71
Legend:
X = Self
SYS = Path traversing PCIe and NUMA nodes. Nodes are connected through SMP, such as QPI, UPI.
PHB = Path traversing PCIe and the PCIe host bridge of a CPU.
PIX = Path traversing a single PCIe switch
PXB = Path traversing multipul PCIe switches
HCCS = Connection traversing HCCS.
NA = Unknown relationship.
```
查看NPU状态, npu-smi命令详解可以查看[官方文档](https://support.huawei.com/enterprise/zh/doc/EDOC1100079287/10dcd668)
```shell
(valle) root@valle:~/src# npu-smi info
+------------------------------------------------------------------------------------------------+
| npu-smi 24.1.rc1.b030 Version: 24.1.rc1.b030 |
+---------------------------+---------------+----------------------------------------------------+
| NPU Name | Health | Power(W) Temp(C) Hugepages-Usage(page)|
| Chip | Bus-Id | AICore(%) Memory-Usage(MB) HBM-Usage(MB) |
+===========================+===============+====================================================+
| 0 910B3 | OK | 101.8 43 0 / 0 |
| 0 | 0000:C1:00.0 | 0 0 / 0 3318 / 65536 |
+===========================+===============+====================================================+
| 1 910B3 | OK | 92.0 39 0 / 0 |
| 0 | 0000:C2:00.0 | 0 0 / 0 3314 / 65536 |
+===========================+===============+====================================================+
| 2 910B3 | OK | 102.0 40 0 / 0 |
| 0 | 0000:81:00.0 | 0 0 / 0 3314 / 65536 |
+===========================+===============+====================================================+
| 3 910B3 | OK | 99.8 40 0 / 0 |
| 0 | 0000:82:00.0 | 0 0 / 0 3314 / 65536 |
+===========================+===============+====================================================+
| 4 910B3 | OK | 98.6 45 0 / 0 |
| 0 | 0000:01:00.0 | 0 0 / 0 3314 / 65536 |
+===========================+===============+====================================================+
| 5 910B3 | OK | 99.7 44 0 / 0 |
| 0 | 0000:02:00.0 | 0 0 / 0 3314 / 65536 |
+===========================+===============+====================================================+
| 6 910B3 | OK | 103.8 45 0 / 0 |
| 0 | 0000:41:00.0 | 0 0 / 0 3314 / 65536 |
+===========================+===============+====================================================+
| 7 910B3 | OK | 98.2 44 0 / 0 |
| 0 | 0000:42:00.0 | 0 0 / 0 3315 / 65536 |
+===========================+===============+====================================================+
```
## 微调
以下介绍LoRA的微调, 全参数微调设置参数`--sft_type full`即可.
| 模型大小 | NPU数量 | deepspeed类型 | 最大显存占用量 |
|------|-------|-------------|-----------|
| 7B | 1 | None | 1 * 28 GB |
| 7B | 4 | None | 4 * 22 GB |
| 7B | 4 | zero2 | 4 * 28 GB |
| 7B | 4 | zero3 | 4 * 22 GB |
| 7B | 8 | None | 8 * 22 GB |
| 14B | 1 | None | 1 * 45 GB |
| 14B | 8 | None | 8 * 51 GB |
| 14B | 8 | zero2 | 8 * 49 GB |
| 14B | 8 | zero3 | 8 * 31 GB |
### 单卡训练
通过如下命令启动单卡微调: (注意: 如果微调期间出现nan的情况, 请设置`--dtype fp32`.)
```shell
# 实验环境: 昇腾910B3
# 显存需求: 28 GB
# 运行时长: 8小时
ASCEND_RT_VISIBLE_DEVICES=0 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
```
### 数据并行训练
我们使用其中的4卡进行ddp训练
```shell
# 实验环境: 4 * 昇腾910B3
# 显存需求: 4 * 22 GB
# 运行时长: 2小时
NPROC_PER_NODE=4 \
ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
```
### Deepspeed训练
ZeRO2:
```shell
# 实验环境: 4 * 昇腾910B3
# 显存需求: 4 * 28GB
# 运行时长: 3.5小时
NPROC_PER_NODE=4 \
ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
--deepspeed default-zero2 \
```
ZeRO3:
```shell
# 实验环境: 4 * 昇腾910B3
# 显存需求: 4 * 22 GB
# 运行时长: 8.5小时
NPROC_PER_NODE=4 \
ASCEND_RT_VISIBLE_DEVICES=0,1,2,3 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
--deepspeed default-zero3 \
```
## 推理
原始模型:
```shell
ASCEND_RT_VISIBLE_DEVICES=0 swift infer --model_type qwen1half-7b-chat
```
LoRA微调后:
```shell
ASCEND_RT_VISIBLE_DEVICES=0 swift infer --ckpt_dir xxx/checkpoint-xxx --load_dataset_config true
# merge-lora并推理
ASCEND_RT_VISIBLE_DEVICES=0 swift export --ckpt_dir xx/checkpoint-xxx --merge_lora true
ASCEND_RT_VISIBLE_DEVICES=0 swift infer --ckpt_dir xxx/checkpoint-xxx-merged --load_dataset_config true
```
## 部署
NPU不支持使用vllm进行推理/部署加速, 但是可以使用原生pytorch进行部署.
原始模型:
```shell
ASCEND_RT_VISIBLE_DEVICES=0 swift deploy --model_type qwen1half-7b-chat
```
LoRA微调后:
```shell
ASCEND_RT_VISIBLE_DEVICES=0 swift deploy --ckpt_dir xxx/checkpoint-xxx --load_dataset_config true
# merge-lora并推理
ASCEND_RT_VISIBLE_DEVICES=0 swift export --ckpt_dir xx/checkpoint-xxx --merge_lora true
ASCEND_RT_VISIBLE_DEVICES=0 swift deploy --ckpt_dir xxx/checkpoint-xxx-merged --load_dataset_config true
```
| swift/docs/source/LLM/NPU推理与微调最佳实践.md/0 | {
"file_path": "swift/docs/source/LLM/NPU推理与微调最佳实践.md",
"repo_id": "swift",
"token_count": 5172
} | 193 |
## Multi-Modal文档
### 📚教程
1. [MLLM部署文档](MLLM部署文档.md)
### ⭐️最佳实践系列
一轮对话可以包含多张图片(或不含图片):
1. [Qwen-VL最佳实践](qwen-vl最佳实践.md)
2. [Qwen-Audio最佳实践](qwen-audio最佳实践.md)
3. [Deepseek-VL最佳实践](deepseek-vl最佳实践.md)
4. [Internlm2-Xcomposers最佳实践](internlm-xcomposer2最佳实践.md)
5. [Phi3-Vision最佳实践](phi3-vision最佳实践.md)
一轮对话只能包含一张图片(可能可以不含图片):
1. [Llava最佳实践](llava最佳实践.md)
2. [Yi-VL最佳实践.md](yi-vl最佳实践.md)
3. [mPLUG-Owl2最佳实践](mplug-owl2最佳实践.md)
整个对话围绕一张图片(可能可以不含图片):
1. [CogVLM最佳实践](cogvlm最佳实践.md), [CogVLM2最佳实践](cogvlm2最佳实践.md), [glm4v最佳实践](glm4v最佳实践.md)
2. [MiniCPM-V最佳实践](minicpm-v最佳实践.md), [MiniCPM-V-2最佳实践](minicpm-v-2最佳实践.md), [MiniCPM-V-2.5最佳实践](minicpm-v-2.5最佳实践.md)
3. [InternVL-Chat-V1.5最佳实践](internvl最佳实践.md)
| swift/docs/source/Multi-Modal/index.md/0 | {
"file_path": "swift/docs/source/Multi-Modal/index.md",
"repo_id": "swift",
"token_count": 744
} | 194 |
swift.trainers
==============
.. automodule:: swift.trainers
.. currentmodule:: swift.trainers
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
trainers.Seq2SeqTrainer
trainers.Trainer
| swift/docs/source/api/swift.trainers.rst/0 | {
"file_path": "swift/docs/source/api/swift.trainers.rst",
"repo_id": "swift",
"token_count": 91
} | 195 |
# HuggingFace Eco-compatibility
By default, we use models and datasets from [ModelScope](https://modelscope.cn/my/overview) for fine-tuning and inference. However, considering that overseas users are more familiar with the [HuggingFace](https://huggingface.co/) ecosystem, we have made it compatible with HuggingFace.
To enable HuggingFace compatibility, you need to set the environment variable `USE_HF=1`. Supported HuggingFace models and datasets can be found in the [Supported Models and Datasets](Supported-models-datasets.md). Note that some datasets are only supported in the ModelScope environment.
Here is an example inference script for qwen1.5-7b-chat:
```shell
# Experimental Environment: A10, 3090, V100
USE_HF=1 CUDA_VISIBLE_DEVICES=0 swift infer --model_type qwen1half-7b-chat
```
Fine-tuning script:
```shell
# Experimental Environment: 2 * A100
# GPU Memory Requirement: 2 * 30GB
USE_HF=1 \
NPROC_PER_NODE=2 \
CUDA_VISIBLE_DEVICES=0,1 \
swift sft \
--model_type qwen1half-7b-chat \
--dataset blossom-math-zh \
--num_train_epochs 5 \
--sft_type lora \
--output_dir output \
```
Please refer to other documents for inference after fine-tuning, and deployment .
| swift/docs/source_en/LLM/Compat-HF.md/0 | {
"file_path": "swift/docs/source_en/LLM/Compat-HF.md",
"repo_id": "swift",
"token_count": 377
} | 196 |
# VLLM Inference Acceleration and Deployment
## Table of Contents
- [Environment Preparation](#environment-preparation)
- [Inference Acceleration](#inference-acceleration)
- [Web-UI Acceleration](#web-ui-acceleration)
- [Deployment](#deployment)
## Environment Preparation
GPU devices: A10, 3090, V100, A100 are all supported.
```bash
# Set pip global mirror (speeds up downloads)
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple/
# Install ms-swift
pip install 'ms-swift[llm]' -U
# vllm version corresponds to cuda version, please select version according to `https://docs.vllm.ai/en/latest/getting_started/installation.html`
pip install vllm
pip install openai -U
# Environment alignment (usually not needed. If you get errors, you can run the code below, the repo uses the latest environment for testing)
pip install -r requirements/framework.txt -U
pip install -r requirements/llm.txt -U
```
## Inference Acceleration
vllm does not support bnb quantized models. The models supported by vllm can be found in [Supported Models](Supported-models-datasets.md#Models).
### qwen-7b-chat
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_vllm
)
model_type = ModelType.qwen_7b_chat
llm_engine = get_vllm_engine(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
# Similar to `transformers.GenerationConfig` interface
llm_engine.generation_config.max_new_tokens = 256
request_list = [{'query': 'Hello!'}, {'query': 'Where is the capital of Zhejiang?'} ]
resp_list = inference_vllm(llm_engine, template, request_list)
for request, resp in zip(request_list, resp_list):
print(f"query: {request['query']}")
print(f"response: {resp['response']}")
history1 = resp_list[1]['history']
request_list = [{'query': 'What delicious food is there', 'history': history1}]
resp_list = inference_vllm(llm_engine, template, request_list)
for request, resp in zip(request_list, resp_list):
print(f"query: {request['query']}")
print(f"response: {resp['response']}")
print(f"history: {resp['history']}")
"""Out[0]
query: Hello!
response: Hello! I'm happy to be of service. Is there anything I can help you with?
query: Where is the capital of Zhejiang?
response: Hangzhou is the capital of Zhejiang Province.
query: What delicious food is there
response: Hangzhou is a city of gastronomy, with many famous dishes and snacks such as West Lake Vinegar Fish, Dongpo Pork, Beggar's Chicken, etc. In addition, Hangzhou has many snack shops where you can taste a variety of local delicacies.
history: [('Where is the capital of Zhejiang?', 'Hangzhou is the capital of Zhejiang Province.'), ('What delicious food is there', "Hangzhou is a city of gastronomy, with many famous dishes and snacks such as West Lake Vinegar Fish, Dongpo Pork, Beggar's Chicken, etc. In addition, Hangzhou has many snack shops where you can taste a variety of local delicacies.")]
"""
```
### Streaming Output
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_stream_vllm
)
model_type = ModelType.qwen_7b_chat
llm_engine = get_vllm_engine(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
# Similar to `transformers.GenerationConfig` interface
llm_engine.generation_config.max_new_tokens = 256
request_list = [{'query': 'Hello!'}, {'query': 'Where is the capital of Zhejiang?'}]
gen = inference_stream_vllm(llm_engine, template, request_list)
query_list = [request['query'] for request in request_list]
print(f"query_list: {query_list}")
for resp_list in gen:
response_list = [resp['response'] for resp in resp_list]
print(f'response_list: {response_list}')
history1 = resp_list[1]['history']
request_list = [{'query': 'What delicious food is there', 'history': history1}]
gen = inference_stream_vllm(llm_engine, template, request_list)
query = request_list[0]['query']
print(f"query: {query}")
for resp_list in gen:
response = resp_list[0]['response']
print(f'response: {response}')
history = resp_list[0]['history']
print(f'history: {history}')
"""Out[0]
query_list: ['Hello!', 'Where is the capital of Zhejiang?']
...
response_list: ['Hello! I'm happy to be of service. Is there anything I can help you with?', 'Hangzhou is the capital of Zhejiang Province.']
query: What delicious food is there
...
response: Hangzhou is a city of gastronomy, with many famous dishes and snacks such as West Lake Vinegar Fish, Dongpo Pork, Beggar's Chicken, etc. In addition, Hangzhou has many snack shops where you can taste a variety of local delicacies.
history: [('Where is the capital of Zhejiang?', 'Hangzhou is the capital of Zhejiang Province.'), ('What delicious food is there', "Hangzhou is a city of gastronomy, with many famous dishes and snacks such as West Lake Vinegar Fish, Dongpo Pork, Beggar's Chicken, etc. In addition, Hangzhou has many snack shops where you can taste a variety of local delicacies.")]
"""
```
### chatglm3
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_vllm
)
model_type = ModelType.chatglm3_6b
llm_engine = get_vllm_engine(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
# Similar to `transformers.GenerationConfig` interface
llm_engine.generation_config.max_new_tokens = 256
request_list = [{'query': 'Hello!'}, {'query': 'Where is the capital of Zhejiang?'}]
resp_list = inference_vllm(llm_engine, template, request_list)
for request, resp in zip(request_list, resp_list):
print(f"query: {request['query']}")
print(f"response: {resp['response']}")
history1 = resp_list[1]['history']
request_list = [{'query': 'What delicious food is there', 'history': history1}]
resp_list = inference_vllm(llm_engine, template, request_list)
for request, resp in zip(request_list, resp_list):
print(f"query: {request['query']}")
print(f"response: {resp['response']}")
print(f"history: {resp['history']}")
"""Out[0]
query: Hello!
response: Hello, I am an AI assistant. I'm very pleased to serve you! Do you have any questions I can help you answer?
query: Where is the capital of Zhejiang?
response: The capital of Zhejiang is Hangzhou.
query: What delicious food is there
response: Zhejiang has many delicious foods, some of the most famous ones include Longjing Shrimp from Hangzhou, Dongpo Pork, West Lake Vinegar Fish, Beggar's Chicken, etc. In addition, Zhejiang also has many specialty snacks and pastries, such as Tang Yuan and Nian Gao from Ningbo, stir-fried crab and Wenzhou meatballs from Wenzhou, etc.
history: [('Where is the capital of Zhejiang?', 'The capital of Zhejiang is Hangzhou.'), ('What delicious food is there', 'Zhejiang has many delicious foods, some of the most famous ones include Longjing Shrimp from Hangzhou, Dongpo Pork, West Lake Vinegar Fish, Beggar's Chicken, etc. In addition, Zhejiang also has many specialty snacks and pastries, such as Tang Yuan and Nian Gao from Ningbo, stir-fried crab and Wenzhou meatballs from Wenzhou, etc.')]
"""
```
### Using CLI
```bash
# qwen
CUDA_VISIBLE_DEVICES=0 swift infer --model_type qwen-7b-chat --infer_backend vllm
# yi
CUDA_VISIBLE_DEVICES=0 swift infer --model_type yi-6b-chat --infer_backend vllm
# gptq
CUDA_VISIBLE_DEVICES=0 swift infer --model_type qwen1half-7b-chat-int4 --infer_backend vllm
```
### Fine-tuned Models
**Single sample inference**:
For models fine-tuned using LoRA, you need to first [merge-lora](LLM-fine-tuning.md#merge-lora) to generate a complete checkpoint directory.
Models fine-tuned with full parameters can seamlessly use VLLM for inference acceleration.
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_vllm
)
ckpt_dir = 'vx-xxx/checkpoint-100-merged'
model_type = ModelType.qwen_7b_chat
template_type = get_default_template_type(model_type)
llm_engine = get_vllm_engine(model_type, model_id_or_path=ckpt_dir)
tokenizer = llm_engine.hf_tokenizer
template = get_template(template_type, tokenizer)
query = 'Hello'
resp = inference_vllm(llm_engine, template, [{'query': query}])[0]
print(f"response: {resp['response']}")
print(f"history: {resp['history']}")
```
**Using CLI**:
```bash
# merge LoRA incremental weights and use vllm for inference acceleration
# If you need quantization, you can specify `--quant_bits 4`.
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir 'xxx/vx-xxx/checkpoint-xxx' --merge_lora true
# Evaluate using dataset
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir 'xxx/vx-xxx/checkpoint-xxx-merged' \
--infer_backend vllm \
--load_dataset_config true \
# Manual evaluation
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir 'xxx/vx-xxx/checkpoint-xxx-merged' \
--infer_backend vllm \
```
## Web-UI Acceleration
### Original Models
```bash
CUDA_VISIBLE_DEVICES=0 swift app-ui --model_type qwen-7b-chat --infer_backend vllm
```
### Fine-tuned Models
```bash
# merge LoRA incremental weights and use vllm as backend to build app-ui
# If you need quantization, you can specify `--quant_bits 4`.
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir 'xxx/vx-xxx/checkpoint-xxx' --merge_lora true
CUDA_VISIBLE_DEVICES=0 swift app-ui --ckpt_dir 'xxx/vx-xxx/checkpoint-xxx-merged' --infer_backend vllm
```
## Deployment
Swift uses VLLM as the inference backend and is compatible with the OpenAI API style.
For server deployment command line arguments, refer to: [deploy command line arguments](Command-line-parameters.md#deploy-Parameters).
For OpenAI API arguments on the client side, refer to: https://platform.openai.com/docs/api-reference/introduction.
### Original Models
#### qwen-7b-chat
**Server side:**
```bash
CUDA_VISIBLE_DEVICES=0 swift deploy --model_type qwen-7b-chat
# Multi-GPU deployment
RAY_memory_monitor_refresh_ms=0 CUDA_VISIBLE_DEVICES=0,1,2,3 swift deploy --model_type qwen-7b-chat --tensor_parallel_size 4
```
**Client side:**
Test:
```bash
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "qwen-7b-chat",
"messages": [{"role": "user", "content": "What to do if I can't fall asleep at night?"}],
"max_tokens": 256,
"temperature": 0
}'
```
Synchronous client interface using swift:
```python
from swift.llm import get_model_list_client, XRequestConfig, inference_client
model_list = get_model_list_client()
model_type = model_list.data[0].id
print(f'model_type: {model_type}')
query = 'Where is the capital of Zhejiang?'
request_config = XRequestConfig(seed=42)
resp = inference_client(model_type, query, request_config=request_config)
response = resp.choices[0].message.content
print(f'query: {query}')
print(f'response: {response}')
history = [(query, response)]
query = 'What delicious food is there?'
request_config = XRequestConfig(stream=True, seed=42)
stream_resp = inference_client(model_type, query, history, request_config=request_config)
print(f'query: {query}')
print('response: ', end='')
for chunk in stream_resp:
print(chunk.choices[0].delta.content, end='', flush=True)
print()
"""Out[0]
model_type: qwen-7b-chat
query: Where is the capital of Zhejiang?
response: The capital of Zhejiang Province is Hangzhou.
query: What delicious food is there?
response: Hangzhou has many delicious foods, such as West Lake Vinegar Fish, Dongpo Pork, Longjing Shrimp, Beggar's Chicken, etc. In addition, Hangzhou also has many specialty snacks, such as West Lake Lotus Root Powder, Hangzhou Xiao Long Bao, Hangzhou You Tiao, etc.
"""
```
Asynchronous client interface using swift:
```python
import asyncio
from swift.llm import get_model_list_client, XRequestConfig, inference_client_async
model_list = get_model_list_client()
model_type = model_list.data[0].id
print(f'model_type: {model_type}')
query = 'Where is the capital of Zhejiang?'
request_config = XRequestConfig(seed=42)
resp = asyncio.run(inference_client_async(model_type, query, request_config=request_config))
response = resp.choices[0].message.content
print(f'query: {query}')
print(f'response: {response}')
async def _stream():
global query
history = [(query, response)]
query = 'What delicious food is there?'
request_config = XRequestConfig(stream=True, seed=42)
stream_resp = await inference_client_async(model_type, query, history, request_config=request_config)
print(f'query: {query}')
print('response: ', end='')
async for chunk in stream_resp:
print(chunk.choices[0].delta.content, end='', flush=True)
print()
asyncio.run(_stream())
"""Out[0]
model_type: qwen-7b-chat
query: Where is the capital of Zhejiang?
response: The capital of Zhejiang Province is Hangzhou.
query: What delicious food is there?
response: Hangzhou has many delicious foods, such as West Lake Vinegar Fish, Dongpo Pork, Longjing Shrimp, Beggar's Chicken, etc. In addition, Hangzhou also has many specialty snacks, such as West Lake Lotus Root Powder, Hangzhou Xiao Long Bao, Hangzhou You Tiao, etc.
"""
```
Using OpenAI (synchronous):
```python
from openai import OpenAI
client = OpenAI(
api_key='EMPTY',
base_url='http://localhost:8000/v1',
)
model_type = client.models.list().data[0].id
print(f'model_type: {model_type}')
query = 'Where is the capital of Zhejiang?'
messages = [{
'role': 'user',
'content': query
}]
resp = client.chat.completions.create(
model=model_type,
messages=messages,
seed=42)
response = resp.choices[0].message.content
print(f'query: {query}')
print(f'response: {response}')
# Streaming
messages.append({'role': 'assistant', 'content': response})
query = 'What delicious food is there?'
messages.append({'role': 'user', 'content': query})
stream_resp = client.chat.completions.create(
model=model_type,
messages=messages,
stream=True,
seed=42)
print(f'query: {query}')
print('response: ', end='')
for chunk in stream_resp:
print(chunk.choices[0].delta.content, end='', flush=True)
print()
"""Out[0]
model_type: qwen-7b-chat
query: Where is the capital of Zhejiang?
response: The capital of Zhejiang Province is Hangzhou.
query: What delicious food is there?
response: Hangzhou has many delicious foods, such as West Lake Vinegar Fish, Dongpo Pork, Longjing Shrimp, Beggar's Chicken, etc. In addition, Hangzhou also has many specialty snacks, such as West Lake Lotus Root Powder, Hangzhou Xiao Long Bao, Hangzhou You Tiao, etc.
"""
```
#### qwen-7b
**Server side:**
```bash
CUDA_VISIBLE_DEVICES=0 swift deploy --model_type qwen-7b
# Multi-GPU deployment
RAY_memory_monitor_refresh_ms=0 CUDA_VISIBLE_DEVICES=0,1,2,3 swift deploy --model_type qwen-7b --tensor_parallel_size 4
```
**Client side:**
Test:
```bash
curl http://localhost:8000/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "qwen-7b",
"prompt": "Zhejiang -> Hangzhou\nAnhui -> Hefei\nSichuan ->",
"max_tokens": 32,
"temperature": 0.1,
"seed": 42
}'
```
Synchronous client interface using swift:
```python
from swift.llm import get_model_list_client, XRequestConfig, inference_client
model_list = get_model_list_client()
model_type = model_list.data[0].id
print(f'model_type: {model_type}')
query = 'Zhejiang -> Hangzhou\nAnhui -> Hefei\nSichuan ->'
request_config = XRequestConfig(max_tokens=32, temperature=0.1, seed=42)
resp = inference_client(model_type, query, request_config=request_config)
response = resp.choices[0].text
print(f'query: {query}')
print(f'response: {response}')
request_config.stream = True
stream_resp = inference_client(model_type, query, request_config=request_config)
print(f'query: {query}')
print('response: ', end='')
for chunk in stream_resp:
print(chunk.choices[0].text, end='', flush=True)
print()
"""Out[0]
model_type: qwen-7b
query: Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan ->
response: Chengdu
Guangdong -> Guangzhou
Jiangsu -> Nanjing
Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan -> Chengdu
query: Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan ->
response: Chengdu
Guangdong -> Guangzhou
Jiangsu -> Nanjing
Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan -> Chengdu
"""
```
Asynchronous client interface using swift:
```python
import asyncio
from swift.llm import get_model_list_client, XRequestConfig, inference_client_async
model_list = get_model_list_client()
model_type = model_list.data[0].id
print(f'model_type: {model_type}')
query = 'Zhejiang -> Hangzhou\nAnhui -> Hefei\nSichuan ->'
request_config = XRequestConfig(max_tokens=32, temperature=0.1, seed=42)
resp = asyncio.run(inference_client_async(model_type, query, request_config=request_config))
response = resp.choices[0].text
print(f'query: {query}')
print(f'response: {response}')
async def _stream():
request_config.stream = True
stream_resp = await inference_client_async(model_type, query, request_config=request_config)
print(f'query: {query}')
print('response: ', end='')
async for chunk in stream_resp:
print(chunk.choices[0].text, end='', flush=True)
print()
asyncio.run(_stream())
"""Out[0]
model_type: qwen-7b
query: Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan ->
response: Chengdu
Guangdong -> Guangzhou
Jiangsu -> Nanjing
Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan -> Chengdu
query: Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan ->
response: Chengdu
Guangdong -> Guangzhou
Jiangsu -> Nanjing
Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan -> Chengdu
"""
```
Using OpenAI (synchronous):
```python
from openai import OpenAI
client = OpenAI(
api_key='EMPTY',
base_url='http://localhost:8000/v1',
)
model_type = client.models.list().data[0].id
print(f'model_type: {model_type}')
query = 'Zhejiang -> Hangzhou\nAnhui -> Hefei\nSichuan ->'
kwargs = {'model': model_type, 'prompt': query, 'seed': 42, 'temperature': 0.1, 'max_tokens': 32}
resp = client.completions.create(**kwargs)
response = resp.choices[0].text
print(f'query: {query}')
print(f'response: {response}')
# Streaming
stream_resp = client.completions.create(stream=True, **kwargs)
response = resp.choices[0].text
print(f'query: {query}')
print('response: ', end='')
for chunk in stream_resp:
print(chunk.choices[0].text, end='', flush=True)
print()
"""Out[0]
model_type: qwen-7b
query: Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan ->
response: Chengdu
Guangdong -> Guangzhou
Jiangsu -> Nanjing
Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan -> Chengdu
query: Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan ->
response: Chengdu
Guangdong -> Guangzhou
Jiangsu -> Nanjing
Zhejiang -> Hangzhou
Anhui -> Hefei
Sichuan -> Chengdu
"""
```
### Fine-tuned Models
Server side:
```bash
# merge LoRA incremental weights and deploy
# If you need quantization, you can specify `--quant_bits 4`.
CUDA_VISIBLE_DEVICES=0 swift export \
--ckpt_dir 'xxx/vx-xxx/checkpoint-xxx' --merge_lora true
CUDA_VISIBLE_DEVICES=0 swift deploy --ckpt_dir 'xxx/vx-xxx/checkpoint-xxx-merged'
```
The example code for the client side is the same as the original models.
## Multiple LoRA Deployments
The current model deployment method now supports multiple LoRA deployments with `peft>=0.10.0`. The specific steps are:
- Ensure `merge_lora` is set to `False` during deployment.
- Use the `--lora_modules` argument, which can be referenced in the [command line documentation](Command-line-parameters.md).
- Specify the name of the LoRA tuner in the model field during inference.
Example:
```shell
# Assuming a LoRA model named Kakarot was trained from llama3-8b-instruct
# Server side
swift deploy --ckpt_dir /mnt/ckpt-1000 --infer_backend pt --lora_modules my_tuner=/mnt/my-tuner
# This loads two tuners, one is `default-lora` from `/mnt/ckpt-1000`, and the other is `my_tuner` from `/mnt/my-tuner`
# Client side
curl http://localhost:8000/v1/chat/completions -H "Content-Type: application/json" -d '{
"model": "my-tuner",
"messages": [{"role": "user", "content": "who are you?"}],
"max_tokens": 256,
"temperature": 0
}'
# resp: I am Kakarot...
# If the mode='llama3-8b-instruct' is specified, it will return I'm llama3..., which is the response of the original model
```
> [!NOTE]
>
> If the `--ckpt_dir` parameter is a LoRA path, the original default will be loaded onto the default-lora tuner, and other tuners need to be loaded through `lora_modules` manually.
## VLLM & LoRA
Models supported by VLLM & LoRA can be viewed at: https://docs.vllm.ai/en/latest/models/supported_models.html
### Setting Up LoRA
```shell
# Experimental environment: 4 * A100
# 4 * 30GB GPU memory
CUDA_VISIBLE_DEVICES=0,1,2,3 \
NPROC_PER_NODE=4 \
swift sft \
--model_type llama2-7b-chat \
--dataset self-cognition#500 sharegpt-gpt4:default#1000 \
--logging_steps 5 \
--max_length 4096 \
--learning_rate 1e-4 \
--output_dir output \
--lora_target_modules ALL \
--model_name 'Xiao Huang' \
--model_author ModelScope \
```
### Accelerating VLLM Inference
Inference:
```shell
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir output/llama2-7b-chat/vx-xxx/checkpoint-xxx \
--infer_backend vllm \
--vllm_enable_lora true
```
Inference results:
```python
"""
<<< who are you?
I am an artificial intelligence language model developed by ModelScope. I am designed to assist and communicate with users in a helpful and respectful manner. I can answer questions, provide information, and engage in conversation. How can I help you?
"""
```
Single sample inference:
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import torch
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_stream_vllm, LoRARequest, inference_vllm
)
lora_checkpoint = 'output/llama2-7b-chat/vx-xxx/checkpoint-xxx'
lora_request = LoRARequest('default-lora', 1, lora_checkpoint)
model_type = ModelType.llama2_7b_chat
llm_engine = get_vllm_engine(model_type, torch.float16, enable_lora=True,
max_loras=1, max_lora_rank=16)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
# Interface similar to `transformers.GenerationConfig`
llm_engine.generation_config.max_new_tokens = 256
# using lora
request_list = [{'query': 'who are you?'}]
query = request_list[0]['query']
resp_list = inference_vllm(llm_engine, template, request_list, lora_request=lora_request)
response = resp_list[0]['response']
print(f'query: {query}')
print(f'response: {response}')
# without lora
gen = inference_stream_vllm(llm_engine, template, request_list)
query = request_list[0]['query']
print(f'query: {query}\nresponse: ', end='')
print_idx = 0
for resp_list in gen:
response = resp_list[0]['response']
print(response[print_idx:], end='', flush=True)
print_idx = len(response)
print()
"""
query: who are you?
response: I am an artificial intelligence language model developed by ModelScope. I can understand and respond to text-based questions and prompts, and provide information and assistance on a wide range of topics.
query: who are you?
response: Hello! I'm just an AI assistant, here to help you with any questions or tasks you may have. I'm designed to be helpful, respectful, and honest in my responses, and I strive to provide socially unbiased and positive answers. I'm not a human, but a machine learning model trained on a large dataset of text to generate responses to a wide range of questions and prompts. I'm here to help you in any way I can, while always ensuring that my answers are safe and respectful. Is there anything specific you'd like to know or discuss?
"""
```
### Deployment
**Server**:
```shell
CUDA_VISIBLE_DEVICES=0 swift deploy \
--ckpt_dir output/llama2-7b-chat/vx-xxx/checkpoint-xxx \
--infer_backend vllm \
--vllm_enable_lora true
```
**Client**:
```bash
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "default-lora",
"messages": [{"role": "user", "content": "who are you?"}],
"max_tokens": 256,
"temperature": 0
}'
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "llama2-7b-chat",
"messages": [{"role": "user", "content": "who are you?"}],
"max_tokens": 256,
"temperature": 0
}'
```
Output:
```python
"""
{"model":"default-lora","choices":[{"index":0,"message":{"role":"assistant","content":"I am an artificial intelligence language model developed by ModelScope. I am designed to assist and communicate with users in a helpful, respectful, and honest manner. I can answer questions, provide information, and engage in conversation. How can I assist you?"},"finish_reason":"stop"}],"usage":{"prompt_tokens":141,"completion_tokens":53,"total_tokens":194},"id":"chatcmpl-fb95932dcdab4ce68f4be49c9946b306","object":"chat.completion","created":1710820459}
{"model":"llama2-7b-chat","choices":[{"index":0,"message":{"role":"assistant","content":" Hello! I'm just an AI assistant, here to help you with any questions or concerns you may have. I'm designed to provide helpful, respectful, and honest responses, while ensuring that my answers are socially unbiased and positive in nature. I'm not capable of providing harmful, unethical, racist, sexist, toxic, dangerous, or illegal content, and I will always do my best to explain why I cannot answer a question if it does not make sense or is not factually coherent. If I don't know the answer to a question, I will not provide false information. My goal is to assist and provide accurate information to the best of my abilities. Is there anything else I can help you with?"},"finish_reason":"stop"}],"usage":{"prompt_tokens":141,"completion_tokens":163,"total_tokens":304},"id":"chatcmpl-d867a3a52bb7451588d4f73e1df4ba95","object":"chat.completion","created":1710820557}
"""
```
With openai:
```python
from openai import OpenAI
client = OpenAI(
api_key='EMPTY',
base_url='http://localhost:8000/v1',
)
model_type_list = [model.id for model in client.models.list().data]
print(f'model_type_list: {model_type_list}')
query = 'who are you?'
messages = [{
'role': 'user',
'content': query
}]
resp = client.chat.completions.create(
model='default-lora',
messages=messages,
seed=42)
response = resp.choices[0].message.content
print(f'query: {query}')
print(f'response: {response}')
# stream
stream_resp = client.chat.completions.create(
model='llama2-7b-chat',
messages=messages,
stream=True,
seed=42)
print(f'query: {query}')
print('response: ', end='')
for chunk in stream_resp:
print(chunk.choices[0].delta.content, end='', flush=True)
print()
"""Out[0]
model_type_list: ['llama2-7b-chat', 'default-lora']
query: who are you?
response: I am an artificial intelligence language model developed by ModelScope. I am designed to assist and communicate with users in a helpful, respectful, and honest manner. I can answer questions, provide information, and engage in conversation. How can I assist you?
query: who are you?
response: Hello! I'm just an AI assistant, here to help you with any questions or concerns you may have. I'm designed to provide helpful, respectful, and honest responses, while ensuring that my answers are socially unbiased and positive in nature. I'm not capable of providing harmful, unethical, racist, sexist, toxic, dangerous, or illegal content, and I will always do my best to explain why I cannot answer a question if it does not make sense or is not factually coherent. If I don't know the answer to a question, I will not provide false information. Is there anything else I can help you with?
"""
```
| swift/docs/source_en/LLM/VLLM-inference-acceleration-and-deployment.md/0 | {
"file_path": "swift/docs/source_en/LLM/VLLM-inference-acceleration-and-deployment.md",
"repo_id": "swift",
"token_count": 9509
} | 197 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.llm import rome_main
if __name__ == '__main__':
rome_main()
| swift/examples/pytorch/llm/rome_infer.py/0 | {
"file_path": "swift/examples/pytorch/llm/rome_infer.py",
"repo_id": "swift",
"token_count": 47
} | 198 |
# Experiment env: A10, RTX3090/4090, A100
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/codeqwen1half-7b-chat-awq/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--use_flash_attn false \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--stream false \
--merge_lora false \
| swift/examples/pytorch/llm/scripts/codeqwen1half_7b_chat_awq/lora/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/codeqwen1half_7b_chat_awq/lora/infer.sh",
"repo_id": "swift",
"token_count": 179
} | 199 |
# Experimental environment: 2 * 3090
# 2 * 23GB GPU memory
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
python llm_sft.py \
--model_id_or_path modelscope/Llama-2-70b-chat-ms \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--dataset sql-create-context-en \
--train_dataset_sample 20000 \
--num_train_epochs 1 \
--max_length 2048 \
--check_dataset_strategy warning \
--quantization_bit 4 \
--bnb_4bit_comp_dtype AUTO \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules q_proj v_proj \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
| swift/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_mp/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_mp/sft.sh",
"repo_id": "swift",
"token_count": 437
} | 200 |
# Experimental environment: 2 * 3090
# 2 * 23GB GPU memory
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
python llm_sft.py \
--model_id_or_path OpenBuddy/openbuddy-llama2-70b-v10.1-bf16 \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--dataset blossom-math-zh \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 2048 \
--check_dataset_strategy warning \
--quantization_bit 4 \
--bnb_4bit_comp_dtype AUTO \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules q_proj v_proj \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
| swift/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_mp/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_mp/sft.sh",
"repo_id": "swift",
"token_count": 442
} | 201 |
# Experimental environment: A100
# 2*40GB GPU memory
CUDA_VISIBLE_DEVICES=0,1 \
swift sft \
--model_type qwen1half-32b-chat \
--sft_type lora \
--tuner_backend peft \
--dtype AUTO \
--output_dir output \
--dataset alpaca-zh alpaca-en \
--train_dataset_sample 5000 \
--num_train_epochs 2 \
--max_length 2048 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules DEFAULT \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn true \
| swift/examples/pytorch/llm/scripts/qwen1half_32b_chat/lora_mp/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen1half_32b_chat/lora_mp/sft.sh",
"repo_id": "swift",
"token_count": 368
} | 202 |
# Experimental environment: A100
# 42GB GPU memory
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_sft.py \
--model_type qwen1half-moe-a2_7b-chat \
--sft_type lora \
--tuner_backend peft \
--dtype AUTO \
--output_dir output \
--dataset blossom-math-zh \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 1024 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn true \
| swift/examples/pytorch/llm/scripts/qwen1half_moe_a2_7b_chat/lora/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen1half_moe_a2_7b_chat/lora/sft.sh",
"repo_id": "swift",
"token_count": 381
} | 203 |
# Experimental environment: A10, 3090
# 16GB GPU memory
# Recommended to use `qwen_14b_chat_int4`
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_id_or_path qwen/Qwen-14B-Chat \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--dataset blossom-math-zh \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 2048 \
--check_dataset_strategy warning \
--quantization_bit 4 \
--bnb_4bit_comp_dtype AUTO \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn false \
| swift/examples/pytorch/llm/scripts/qwen_14b_chat/qlora/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen_14b_chat/qlora/sft.sh",
"repo_id": "swift",
"token_count": 435
} | 204 |
# Experimental environment: A100
# 80GB GPU memory
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_type qwen-7b-chat \
--sft_type full \
--train_dataset_sample -1 \
--eval_steps 100 \
--output_dir output \
--num_train_epochs 1 \
--max_length 4096 \
--learning_rate 1e-5 \
--use_flash_attn true \
--save_only_model true \
--dataset codefuse-evol-instruction-zh \
--preprocess_num_proc 4 \
| swift/examples/pytorch/llm/scripts/qwen_7b_chat/full/sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/qwen_7b_chat/full/sft.sh",
"repo_id": "swift",
"token_count": 188
} | 205 |
# Experimental environment: 2 * A100
# 80GB GPU memory
# Note: TorchAcc is currently only available internally.
export USE_TORCHACC=1
export TORCHACC_TRIM_GRAPH=1
export XLA_IR_SHAPE_CACHE_SIZE=100000000
export XLA_ALLOCATOR_FRACTION=0.95
export XLA_EXPERIMENTAL=nonzero:masked_select
export XLA_PERSISTENT_CACHE_PATH=./output/compiled_cache/Llama-2-13b-chat-ms
mkdir -p $XLA_PERSISTENT_CACHE_PATH
NPROC_PER_NODE=2 \
CUDA_VISIBLE_DEVICES=0,1 \
swift sft \
--model_id_or_path modelscope/Llama-2-13b-chat-ms \
--model_layer_cls_name LlamaDecoderLayer \
--dataset codefuse-python-en \
--template_type llama \
--sft_type lora \
--output_dir output \
--num_train_epochs 1 \
--max_length 2048 \
--batch_size 14 \
--use_flash_attn true \
--gradient_accumulation_steps 1 \
--gradient_checkpointing no \
--tuner_backend 'peft' \
--dataset_test_ratio 0 \
--save_strategy no \
--eval_steps 2000000 \
--save_steps 2000000 \
--logging_steps 100 \
--acc_steps 100 \
--preprocess_num_proc 1 \
--metric_warmup_step 0.1 \
--report_to 'none'
| swift/examples/pytorch/llm/scripts/torchacc/llama2_13b_chat/acc_lora_dp_sft.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/torchacc/llama2_13b_chat/acc_lora_dp_sft.sh",
"repo_id": "swift",
"token_count": 438
} | 206 |
# Experimental environment: 3090
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_infer.py \
--ckpt_dir "output/xverse-13b/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.7 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
| swift/examples/pytorch/llm/scripts/xverse_13b/qlora/infer.sh/0 | {
"file_path": "swift/examples/pytorch/llm/scripts/xverse_13b/qlora/infer.sh",
"repo_id": "swift",
"token_count": 156
} | 207 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.aigc import infer_text_to_image_sdxl
if __name__ == '__main__':
infer_text_to_image_sdxl()
| swift/examples/pytorch/sdxl/infer_text_to_image_sdxl.py/0 | {
"file_path": "swift/examples/pytorch/sdxl/infer_text_to_image_sdxl.py",
"repo_id": "swift",
"token_count": 61
} | 208 |
PYTHONPATH=../../../ \
accelerate launch train_text_to_image_lora.py \
--pretrained_model_name_or_path="AI-ModelScope/stable-diffusion-v1-5" \
--dataset_name="AI-ModelScope/pokemon-blip-captions" \
--caption_column="text" \
--resolution=512 \
--random_flip \
--train_batch_size=1 \
--num_train_epochs=100 \
--checkpointing_steps=5000 \
--learning_rate=1e-04 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--mixed_precision="fp16" \
--seed=42 \
--output_dir="train_text_to_image_lora" \
--validation_prompt="cute dragon creature" \
--report_to="tensorboard" \
| swift/examples/pytorch/sdxl/scripts/run_train_text_to_image_lora.sh/0 | {
"file_path": "swift/examples/pytorch/sdxl/scripts/run_train_text_to_image_lora.sh",
"repo_id": "swift",
"token_count": 247
} | 209 |
PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/finetune_stable_diffusion.py \
--model 'AI-ModelScope/stable-diffusion-v2-1' \
--model_revision 'v1.0.1' \
--prompt "a dog" \
--work_dir './tmp/lora_diffusion' \
--train_dataset_name 'buptwq/lora-stable-diffusion-finetune' \
--max_epochs 200 \
--lora_rank 16 \
--lora_alpha 24 \
--save_ckpt_strategy 'by_epoch' \
--logging_interval 1 \
--train.dataloader.workers_per_gpu 0 \
--evaluation.dataloader.workers_per_gpu 0 \
--train.optimizer.lr 1e-4 \
--sample_nums 10 \
--num_inference_steps 30 \
--use_model_config true
| swift/examples/pytorch/stable_diffusion/run_train_lora.sh/0 | {
"file_path": "swift/examples/pytorch/stable_diffusion/run_train_lora.sh",
"repo_id": "swift",
"token_count": 290
} | 210 |
{"lora_dtype": null, "lorap_lr_ratio": null, "lorap_emb_lr": 1e-06} | swift/output/qwen-7b-chat/v1-20240626-092716/checkpoint-12/additional_config.json/0 | {
"file_path": "swift/output/qwen-7b-chat/v1-20240626-092716/checkpoint-12/additional_config.json",
"repo_id": "swift",
"token_count": 33
} | 211 |
import os
import re
import torch
from modelscope import snapshot_download
from swift.llm import MODEL_MAPPING
def test_readme():
for model_type in MODEL_MAPPING.keys():
model_id = MODEL_MAPPING[model_type]['model_id_or_path']
model_dir = snapshot_download(model_id, revision='master')
readme_path = os.path.join(model_dir, 'README.md')
assert os.path.exists(readme_path)
with open(readme_path, 'r') as f:
text = f.read()
code_list = re.findall(r'```python\n(.+?)\n```', text, re.M | re.S)
print(f'model_type: {model_type}')
for code in code_list:
if 'import' not in code or 'modelscope' not in code:
continue
try:
exec(code)
except Exception:
print(code)
input('[ENTER')
torch.cuda.empty_cache()
if __name__ == '__main__':
test_readme()
| swift/scripts/tests/test_readme.py/0 | {
"file_path": "swift/scripts/tests/test_readme.py",
"repo_id": "swift",
"token_count": 459
} | 212 |
from .infer_controlnet import main as infer_controlnet
from .infer_controlnet_sdxl import main as infer_controlnet_sdxl
from .infer_dreambooth import main as infer_dreambooth
from .infer_dreambooth_lora import main as infer_dreambooth_lora
from .infer_dreambooth_lora_sdxl import main as infer_dreambooth_lora_sdxl
from .infer_text_to_image import main as infer_text_to_image
from .infer_text_to_image_lora import main as infer_text_to_image_lora
from .infer_text_to_image_lora_sdxl import main as infer_text_to_image_lora_sdxl
from .infer_text_to_image_sdxl import main as infer_text_to_image_sdxl
from .train_controlnet import main as train_controlnet
from .train_controlnet_sdxl import main as train_controlnet_sdxl
from .train_dreambooth import main as train_dreambooth
from .train_dreambooth_lora import main as train_dreambooth_lora
from .train_dreambooth_lora_sdxl import main as train_dreambooth_lora_sdxl
from .train_text_to_image import main as train_text_to_image
from .train_text_to_image_lora import main as train_text_to_image_lora
from .train_text_to_image_lora_sdxl import main as train_text_to_image_lora_sdxl
from .train_text_to_image_sdxl import main as train_text_to_image_sdxl
| swift/swift/aigc/diffusers/__init__.py/0 | {
"file_path": "swift/swift/aigc/diffusers/__init__.py",
"repo_id": "swift",
"token_count": 433
} | 213 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from swift.llm import rlhf_main
if __name__ == '__main__':
rlhf_main()
| swift/swift/cli/rlhf.py/0 | {
"file_path": "swift/swift/cli/rlhf.py",
"repo_id": "swift",
"token_count": 50
} | 214 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import os
from pathlib import Path
MODELSCOPE_URL_SCHEME = 'http://'
DEFAULT_MODELSCOPE_DOMAIN = 'www.modelscope.cn'
DEFAULT_MODELSCOPE_DATA_ENDPOINT = MODELSCOPE_URL_SCHEME + DEFAULT_MODELSCOPE_DOMAIN
MODELSCOPE_PARALLEL_DOWNLOAD_THRESHOLD_MB = int(os.environ.get('MODELSCOPE_PARALLEL_DOWNLOAD_THRESHOLD_MB', 500))
MODELSCOPE_DOWNLOAD_PARALLELS = int(os.environ.get('MODELSCOPE_DOWNLOAD_PARALLELS', 4))
DEFAULT_MODELSCOPE_GROUP = 'damo'
MODEL_ID_SEPARATOR = '/'
FILE_HASH = 'Sha256'
LOGGER_NAME = 'ModelScopeHub'
DEFAULT_CREDENTIALS_PATH = Path.home().joinpath('.modelscope', 'credentials')
REQUESTS_API_HTTP_METHOD = ['get', 'head', 'post', 'put', 'patch', 'delete']
API_HTTP_CLIENT_TIMEOUT = 5
API_RESPONSE_FIELD_DATA = 'Data'
API_FILE_DOWNLOAD_RETRY_TIMES = 5
API_FILE_DOWNLOAD_TIMEOUT = 60 * 5
API_FILE_DOWNLOAD_CHUNK_SIZE = 1024 * 1024 * 16
API_RESPONSE_FIELD_GIT_ACCESS_TOKEN = 'AccessToken'
API_RESPONSE_FIELD_USERNAME = 'Username'
API_RESPONSE_FIELD_EMAIL = 'Email'
API_RESPONSE_FIELD_MESSAGE = 'Message'
MODELSCOPE_CLOUD_ENVIRONMENT = 'MODELSCOPE_ENVIRONMENT'
MODELSCOPE_CLOUD_USERNAME = 'MODELSCOPE_USERNAME'
MODELSCOPE_SDK_DEBUG = 'MODELSCOPE_SDK_DEBUG'
ONE_YEAR_SECONDS = 24 * 365 * 60 * 60
MODEL_META_FILE_NAME = '.mdl'
MODEL_META_MODEL_ID = 'id'
DEFAULT_MODEL_REVISION = None
MASTER_MODEL_BRANCH = 'master'
DEFAULT_REPOSITORY_REVISION = 'master'
DEFAULT_DATASET_REVISION = 'master'
DEFAULT_DATASET_NAMESPACE = 'modelscope'
DEFAULT_DATA_ACCELERATION_ENDPOINT = 'https://oss-accelerate.aliyuncs.com'
class Licenses(object):
APACHE_V2 = 'Apache License 2.0'
GPL_V2 = 'GPL-2.0'
GPL_V3 = 'GPL-3.0'
LGPL_V2_1 = 'LGPL-2.1'
LGPL_V3 = 'LGPL-3.0'
AFL_V3 = 'AFL-3.0'
ECL_V2 = 'ECL-2.0'
MIT = 'MIT'
class ModelVisibility(object):
PRIVATE = 1
INTERNAL = 3
PUBLIC = 5
| swift/swift/hub/constants.py/0 | {
"file_path": "swift/swift/hub/constants.py",
"repo_id": "swift",
"token_count": 850
} | 215 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import inspect
import logging
import time
from dataclasses import asdict
from http import HTTPStatus
from typing import List, Optional, Union
import json
import torch
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse, StreamingResponse
from modelscope import GenerationConfig
from packaging import version
from peft import PeftModel
from swift.utils import get_logger, get_main, seed_everything
from .agent import split_action_action_input
from .infer import merge_lora, prepare_model_template
from .utils import (TEMPLATE_MAPPING, ChatCompletionMessageToolCall, ChatCompletionRequest, ChatCompletionResponse,
ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse,
ChatMessage, CompletionRequest, CompletionResponse, CompletionResponseChoice,
CompletionResponseStreamChoice, CompletionStreamResponse, DeltaMessage, DeployArguments, Function,
Model, ModelList, Template, UsageInfo, decode_base64, inference, inference_stream,
messages_join_observation, messages_to_history, random_uuid)
logger = get_logger()
app = FastAPI()
_args: Optional[DeployArguments] = None
model = None
llm_engine = None
template: Optional[Template] = None
def create_error_response(status_code: Union[int, str, HTTPStatus], message: str) -> JSONResponse:
status_code = int(status_code)
return JSONResponse({'message': message, 'object': 'error'}, status_code)
@app.get('/v1/models')
async def get_available_models():
global _args
model_list = [_args.model_type]
if _args.lora_request_list is not None:
model_list += [lora_request.lora_name for lora_request in _args.lora_request_list]
data = [
Model(
id=model_id,
is_chat=not is_generation_template(_args.template_type),
is_multimodal=_args.is_multimodal,
owned_by=_args.owned_by) for model_id in model_list
]
return ModelList(data=data)
async def check_length(request: Union[ChatCompletionRequest, CompletionRequest], input_ids: List[int]) -> Optional[str]:
global llm_engine, model, _args
if _args.infer_backend == 'vllm':
max_model_len = llm_engine.model_config.max_model_len
else:
max_model_len = model.max_model_len
num_tokens = len(input_ids)
max_tokens = request.max_tokens
if max_model_len is None:
max_model_len = 8192
logger.warning(
'The current model is unable to retrieve `max_model_len`. It is set to the default value of 8192.')
if max_tokens is None:
max_tokens = max_model_len - num_tokens
request.max_tokens = max_tokens
if max_tokens + num_tokens > max_model_len:
error_msg = (f'Your prompt has {num_tokens} tokens, and you have set the `max_tokens` to {max_tokens}, '
f'but the maximum model length supported is {max_model_len}. '
'Please reduce the number of tokens in the prompt or the `max_tokens`.')
return error_msg
async def check_model(request: Union[ChatCompletionRequest, CompletionRequest]) -> Optional[str]:
model_list = await get_available_models()
model_type_list = [model.id for model in model_list.data]
if request.model in model_type_list:
return
else:
return f'`{request.model}` is not in the model_list: `{model_type_list}`.'
def is_generation_template(template_type: str) -> bool:
template_info = TEMPLATE_MAPPING[template_type]
is_generation = template_info.get('is_generation', False)
return is_generation
@torch.inference_mode()
async def inference_vllm_async(request: Union[ChatCompletionRequest, CompletionRequest], raw_request: Request):
global llm_engine, template, _args
from .utils import VllmGenerationConfig
error_msg = await check_model(request)
if error_msg is not None:
return create_error_response(HTTPStatus.BAD_REQUEST, error_msg)
if request.seed is not None:
seed_everything(request.seed, verbose=False)
_request = {'model': request.model}
if isinstance(request, ChatCompletionRequest):
if is_generation_template(template.template_type):
return create_error_response(
HTTPStatus.BAD_REQUEST, f'The chat template `{template.template_type}` corresponding to '
f'the model `{llm_engine.model_type}` is in text generation format. '
'Please use the `completions` API.')
# For agent, check if response is endwith observations and join tool observation
messages_join_observation(request.messages)
example = messages_to_history(request.messages)
# tool choice
if request.tool_choice is not None and request.tools is not None:
if isinstance(request.tool_choice, dict):
name = request.tool_choice['function']['name']
tool = next((t for t in request.tools if t['function']['name'] == name), None)
if tool is None:
raise ValueError(f"Tool choice '{name}' not found in tools.")
example['tools'] = [tool]
elif request.tool_choice == 'auto':
example['tools'] = request.tools
input_ids = template.encode(example)[0]['input_ids']
request_id = f'chatcmpl-{random_uuid()}'
_request['messages'] = request.messages
else:
if not is_generation_template(template.template_type):
return create_error_response(
HTTPStatus.BAD_REQUEST, f'The chat template `{template.template_type}` corresponding to '
f'the model `{llm_engine.model_type}` is in chat format. '
'Please use the `chat.completions` API.')
example = {'query': request.prompt}
input_ids = template.encode(example)[0]['input_ids']
request_id = f'cmpl-{random_uuid()}'
_request['prompt'] = request.prompt
request_info = {'request_id': request_id}
request_info.update(_request)
error_msg = await check_length(request, input_ids)
if error_msg is not None:
return create_error_response(HTTPStatus.BAD_REQUEST, error_msg)
kwargs = {'max_new_tokens': request.max_tokens}
for key in ['n', 'stop', 'best_of', 'frequency_penalty', 'length_penalty', 'presence_penalty', 'num_beams']:
kwargs[key] = getattr(request, key)
for key in ['temperature', 'top_k', 'top_p', 'repetition_penalty']:
new_value = getattr(request, key)
if new_value is None:
kwargs[key] = getattr(llm_engine.generation_config, key)
else:
kwargs[key] = new_value
generation_config = VllmGenerationConfig(**kwargs)
if generation_config.use_beam_search is True and request.stream is True:
error_msg = 'Streaming generation does not support beam search.'
raise ValueError(error_msg)
tokenizer = template.tokenizer
if tokenizer.eos_token is not None and tokenizer.eos_token not in generation_config.stop:
generation_config.stop.append(tokenizer.eos_token)
if isinstance(template.suffix[-1], str) and template.suffix[-1] not in generation_config.stop:
generation_config.stop.append(template.suffix[-1])
if isinstance(template.suffix[-1], list):
token_str = tokenizer.decode(template.suffix[-1])
if token_str not in generation_config.stop:
generation_config.stop.append(token_str)
request_info['generation_config'] = generation_config
request_info.update({'seed': request.seed, 'stream': request.stream})
logger.info(request_info)
created_time = int(time.time())
generate_kwargs = {}
if _args.vllm_enable_lora and request.model != _args.model_type:
lora_request = None
for lora_req in _args.lora_request_list:
if lora_req.lora_name == request.model:
lora_request = lora_req
break
assert lora_request is not None
generate_kwargs['lora_request'] = lora_request
import vllm
if version.parse(vllm.__version__) >= version.parse('0.4.3'):
result_generator = llm_engine.generate({'prompt_token_ids': input_ids}, generation_config, request_id,
**generate_kwargs)
else:
result_generator = llm_engine.generate(None, generation_config, request_id, input_ids, **generate_kwargs)
async def _generate_full():
result = None
async for result in result_generator:
if await raw_request.is_disconnected():
await llm_engine.abort(request_id)
return create_error_response(HTTPStatus.BAD_REQUEST, 'Client disconnected')
assert result is not None
num_prompt_tokens = len(result.prompt_token_ids)
num_generated_tokens = sum(len(output.token_ids) for output in result.outputs)
usage_info = UsageInfo(
prompt_tokens=num_prompt_tokens,
completion_tokens=num_generated_tokens,
total_tokens=num_prompt_tokens + num_generated_tokens,
)
if isinstance(request, ChatCompletionRequest):
choices = []
for output in result.outputs:
response = template.generate_ids_to_response(output.token_ids)
action, action_input = split_action_action_input(response)
toolcall = None
if action is not None:
toolcall = ChatCompletionMessageToolCall(
id=f'toolcall-{random_uuid()}',
type='function',
function=Function(name=action, arguments=action_input))
choice = ChatCompletionResponseChoice(
index=output.index,
message=ChatMessage(role='assistant', content=response, tool_calls=toolcall),
finish_reason=output.finish_reason,
)
choices.append(choice)
response = ChatCompletionResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
else:
choices = []
for output in result.outputs:
response = template.generate_ids_to_response(output.token_ids)
choice = CompletionResponseChoice(
index=output.index,
text=response,
finish_reason=output.finish_reason,
)
choices.append(choice)
response = CompletionResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
return response
async def _generate_stream():
print_idx_list = [[0] for _ in range(request.n)]
total_res = ['' for _ in range(request.n)]
async for result in result_generator:
num_prompt_tokens = len(result.prompt_token_ids)
num_generated_tokens = sum(len(output.token_ids) for output in result.outputs)
usage_info = UsageInfo(
prompt_tokens=num_prompt_tokens,
completion_tokens=num_generated_tokens,
total_tokens=num_prompt_tokens + num_generated_tokens,
)
for output in result.outputs:
output.delta_text = template.generate_ids_to_response(
output.token_ids, output.finished(), return_delta=True, print_idx=print_idx_list[output.index])
total_res[output.index] += output.delta_text
if isinstance(request, ChatCompletionRequest):
choices = []
for output in result.outputs:
toolcall = None
if output.finish_reason is not None:
action, action_input = split_action_action_input(total_res[output.index])
if action is not None:
toolcall = ChatCompletionMessageToolCall(
id=f'toolcall-{random_uuid()}',
type='function',
function=Function(name=action, arguments=action_input))
choice = ChatCompletionResponseStreamChoice(
index=output.index,
delta=DeltaMessage(role='assistant', content=output.delta_text, tool_calls=toolcall),
finish_reason=output.finish_reason)
choices.append(choice)
response = ChatCompletionStreamResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
else:
choices = []
for output in result.outputs:
choice = CompletionResponseStreamChoice(
index=output.index, text=output.delta_text, finish_reason=output.finish_reason)
choices.append(choice)
response = CompletionStreamResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
yield f'data:{json.dumps(asdict(response), ensure_ascii=False)}\n\n'
yield 'data:[DONE]\n\n'
if request.stream:
return StreamingResponse(_generate_stream())
else:
return await _generate_full()
class _GenerationConfig(GenerationConfig):
def __repr__(self) -> str:
parameters = inspect.signature(self.to_json_string).parameters
kwargs = {}
if 'ignore_metadata' in parameters:
kwargs['ignore_metadata'] = True
gen_kwargs = json.loads(self.to_json_string(**kwargs))
gen_kwargs.pop('transformers_version', None)
return f'GenerationConfig({gen_kwargs})'
@torch.inference_mode()
async def inference_pt_async(request: Union[ChatCompletionRequest, CompletionRequest], raw_request: Request):
global model, template
error_msg = await check_model(request)
if error_msg is not None:
return create_error_response(HTTPStatus.BAD_REQUEST, error_msg)
if request.seed is not None:
seed_everything(request.seed, verbose=False)
_request = {'model': request.model}
if isinstance(request, ChatCompletionRequest):
if is_generation_template(template.template_type):
return create_error_response(
HTTPStatus.BAD_REQUEST, f'The chat template `{template.template_type}` corresponding to '
f'the model `{model.model_type}` is in text generation format. '
'Please use the `completions` API.')
messages = request.messages
# For agent, check if response is endwith observations and join tool observation
messages_join_observation(messages)
images = request.images
if _args.is_multimodal:
messages = decode_base64(messages=messages)['messages']
images = decode_base64(images=images)['images']
example = messages_to_history(messages)
if len(images) > 0:
example['images'] = images
if request.tool_choice is not None and request.tools is not None:
if isinstance(request.tool_choice, dict):
name = request.tool_choice['function']['name']
tool = next((t for t in request.tools if t['function']['name'] == name), None)
if tool is None:
raise ValueError(f"Tool choice '{name}' not found in tools.")
example['tools'] = [tool]
elif request.tool_choice == 'auto':
example['tools'] = request.tools
input_ids = template.encode(example)[0]['input_ids']
request_id = f'chatcmpl-{random_uuid()}'
_request['messages'] = messages
else:
if not is_generation_template(template.template_type):
return create_error_response(
HTTPStatus.BAD_REQUEST, f'The chat template `{template.template_type}` corresponding to '
f'the model `{model.model_type}` is in chat format. '
'Please use the `chat.completions` API.')
prompt = request.prompt
images = request.images
if _args.is_multimodal:
prompt = decode_base64(prompt=prompt)['prompt']
images = decode_base64(images=images)['images']
example = {'query': prompt}
if len(images) > 0:
example['images'] = images
input_ids = template.encode(example)[0]['input_ids']
request_id = f'cmpl-{random_uuid()}'
_request['prompt'] = prompt
request_info = {'request_id': request_id}
request_info.update(_request)
error_msg = await check_length(request, input_ids)
if error_msg is not None:
return create_error_response(HTTPStatus.BAD_REQUEST, error_msg)
kwargs = {'max_new_tokens': request.max_tokens}
# not use: 'n', 'best_of', 'frequency_penalty', 'presence_penalty'
for key in ['length_penalty', 'num_beams']:
kwargs[key] = getattr(request, key)
for key in ['temperature', 'top_k', 'top_p', 'repetition_penalty']:
new_value = getattr(request, key)
if new_value is None:
kwargs[key] = getattr(model.generation_config, key)
else:
kwargs[key] = new_value
if kwargs['temperature'] == 0:
kwargs['do_sample'] = False
kwargs['temperature'] = 1
kwargs['top_p'] = 1
kwargs['top_k'] = 50
else:
kwargs['do_sample'] = True
generation_config = _GenerationConfig(**kwargs)
request_info['generation_config'] = generation_config
request_info.update({'seed': request.seed, 'stop': request.stop, 'stream': request.stream})
logger.info(request_info)
created_time = int(time.time())
adapter_kwargs = {}
if request.model != _args.model_type:
adapter_names = None
for lora_req in _args.lora_request_list:
if lora_req.lora_name == request.model:
adapter_names = request.model
break
assert adapter_names is not None
adapter_kwargs['adapter_names'] = [adapter_names]
elif isinstance(model, PeftModel):
adapter_kwargs['adapter_names'] = ['-']
async def _generate_full():
generation_info = {}
response, _ = inference(
model,
template,
**example,
stop_words=request.stop,
generation_config=generation_config,
generation_info=generation_info,
**adapter_kwargs)
num_prompt_tokens = generation_info['num_prompt_tokens']
num_generated_tokens = generation_info['num_generated_tokens']
usage_info = UsageInfo(
prompt_tokens=num_prompt_tokens,
completion_tokens=num_generated_tokens,
total_tokens=num_prompt_tokens + num_generated_tokens,
)
if isinstance(request, ChatCompletionRequest):
action, action_input = split_action_action_input(response)
toolcall = None
if action is not None:
toolcall = ChatCompletionMessageToolCall(
id=f'toolcall-{random_uuid()}',
type='function',
function=Function(name=action, arguments=action_input))
choices = [
ChatCompletionResponseChoice(
index=0,
message=ChatMessage(role='assistant', content=response, tool_calls=toolcall),
finish_reason=None,
)
]
response = ChatCompletionResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
else:
choices = [CompletionResponseChoice(
index=0,
text=response,
finish_reason=None,
)]
response = CompletionResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
return response
def _generate_stream():
generation_info = {}
gen = inference_stream(
model,
template,
**example,
stop_words=request.stop,
generation_config=generation_config,
generation_info=generation_info,
**adapter_kwargs)
print_idx = 0
response = ''
is_finished = False
while not is_finished:
try:
response, _ = next(gen)
except StopIteration:
is_finished = True
num_prompt_tokens = generation_info['num_prompt_tokens']
num_generated_tokens = generation_info['num_generated_tokens']
usage_info = UsageInfo(
prompt_tokens=num_prompt_tokens,
completion_tokens=num_generated_tokens,
total_tokens=num_prompt_tokens + num_generated_tokens,
)
if isinstance(request, ChatCompletionRequest):
delta_text = response[print_idx:]
print_idx = len(response)
toolcall = None
if is_finished:
action, action_input = split_action_action_input(response)
if action:
toolcall = ChatCompletionMessageToolCall(
id=f'toolcall-{random_uuid()}',
type='function',
function=Function(name=action, arguments=action_input))
choices = [
ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(role='assistant', content=delta_text, tool_calls=toolcall),
finish_reason=None)
]
resp = ChatCompletionStreamResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
else:
delta_text = response[print_idx:]
print_idx = len(response)
choices = [CompletionResponseStreamChoice(index=0, text=delta_text, finish_reason=None)]
resp = CompletionStreamResponse(
model=request.model, choices=choices, usage=usage_info, id=request_id, created=created_time)
yield f'data:{json.dumps(asdict(resp), ensure_ascii=False)}\n\n'
yield 'data:[DONE]\n\n'
if request.stream:
return StreamingResponse(_generate_stream())
else:
return await _generate_full()
@app.post('/v1/chat/completions')
async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request) -> ChatCompletionResponse:
global _args
assert _args is not None
if request.stop is None:
request.stop = []
if _args.infer_backend == 'vllm':
return await inference_vllm_async(request, raw_request)
else:
return await inference_pt_async(request, raw_request)
@app.post('/v1/completions')
async def create_completion(request: CompletionRequest, raw_request: Request) -> CompletionResponse:
global _args
assert _args is not None
if request.stop is None:
request.stop = []
if _args.infer_backend == 'vllm':
return await inference_vllm_async(request, raw_request)
else:
return await inference_pt_async(request, raw_request)
def llm_deploy(args: DeployArguments) -> None:
logger.info(f'args: {args}')
seed_everything(args.seed)
logger_format = logging.Formatter('%(levelname)s: %(asctime)s %(filename)s:%(lineno)d] %(message)s')
logger.handlers[0].setFormatter(logger_format)
import uvicorn
global llm_engine, model, template, _args
_args = args
if args.merge_lora:
merge_lora(args, device_map=args.merge_device_map)
if args.infer_backend == 'vllm':
from .utils import prepare_vllm_engine_template
llm_engine, template = prepare_vllm_engine_template(args, use_async=True)
else:
model, template = prepare_model_template(args)
uvicorn.run(app, host=args.host, port=args.port, ssl_keyfile=args.ssl_keyfile, ssl_certfile=args.ssl_certfile)
deploy_main = get_main(DeployArguments, llm_deploy)
| swift/swift/llm/deploy.py/0 | {
"file_path": "swift/swift/llm/deploy.py",
"repo_id": "swift",
"token_count": 10942
} | 216 |
import asyncio
import inspect
import os
from copy import deepcopy
from typing import Any, Dict, Iterator, List, Optional, Tuple
import torch
import vllm
from modelscope import GenerationConfig
from packaging import version
from torch import dtype as Dtype
from tqdm import tqdm
from transformers import PreTrainedTokenizerBase
from vllm import AsyncEngineArgs, AsyncLLMEngine, EngineArgs, LLMEngine, SamplingParams
from swift.utils import get_logger
from .argument import InferArguments
from .model import get_model_tokenizer
from .template import Template, get_template
try:
from vllm.lora.request import LoRARequest
except ImportError:
pass
logger = get_logger()
def get_vllm_engine(
model_type: str,
torch_dtype: Optional[Dtype] = None,
*,
model_id_or_path: Optional[str] = None,
revision: Optional[str] = None,
gpu_memory_utilization: float = 0.9,
tensor_parallel_size: int = 1,
max_model_len: Optional[int] = None,
disable_custom_all_reduce: bool = True, # Default values different from vllm
enforce_eager: bool = False,
engine_kwargs: Optional[Dict[str, Any]] = None,
use_async: bool = False,
enable_lora: bool = False,
max_loras: int = 1,
max_lora_rank: int = 16,
**kwargs) -> LLMEngine:
model_dir = kwargs.pop('model_dir', None) # compat with swift<1.7
tokenizer = get_model_tokenizer(
model_type,
load_model=False,
model_id_or_path=model_id_or_path,
model_dir=model_dir,
revision=revision,
download_model=True)[1]
model_dir = tokenizer.model_dir
if engine_kwargs is None:
engine_kwargs = {}
dtype_mapping = {torch.float16: 'float16', torch.bfloat16: 'bfloat16', torch.float32: 'float32', None: 'auto'}
dtype = dtype_mapping[torch_dtype]
disable_log_stats = engine_kwargs.pop('disable_log_stats', True)
if use_async:
engine_args_cls = AsyncEngineArgs
llm_engine_cls = AsyncLLMEngine
engine_kwargs['disable_log_requests'] = True
else:
engine_args_cls = EngineArgs
llm_engine_cls = LLMEngine
parameters = inspect.signature(engine_args_cls.__init__).parameters
if 'enable_lora' in parameters and enable_lora:
engine_kwargs['enable_lora'] = enable_lora
engine_kwargs['max_loras'] = max_loras
engine_kwargs['max_lora_rank'] = max_lora_rank
else:
assert not enable_lora, 'The current version of VLLM does not support `enable_lora`. Please upgrade VLLM.'
engine_args = engine_args_cls(
model=model_dir,
trust_remote_code=True,
dtype=dtype,
gpu_memory_utilization=gpu_memory_utilization,
tensor_parallel_size=tensor_parallel_size,
max_model_len=max_model_len,
disable_log_stats=disable_log_stats,
disable_custom_all_reduce=disable_custom_all_reduce,
enforce_eager=enforce_eager,
**engine_kwargs)
try:
from vllm.model_executor.parallel_utils.parallel_state import destroy_model_parallel
destroy_model_parallel()
except ImportError:
pass
# fix HTTPError bug (use model_dir)
os.environ.pop('VLLM_USE_MODELSCOPE', None)
llm_engine = llm_engine_cls.from_engine_args(engine_args)
llm_engine.engine_args = engine_args
llm_engine.model_dir = model_dir
llm_engine.model_type = model_type
if use_async:
_engine = llm_engine.engine
else:
_engine = llm_engine
# compatible with vllm==0.3.*
if version.parse(vllm.__version__) >= version.parse('0.3'):
assert isinstance(_engine.tokenizer.tokenizer, PreTrainedTokenizerBase)
_engine.tokenizer.tokenizer = tokenizer
# fix vllm==0.4 bug (very slow)
if version.parse(vllm.__version__) >= version.parse('0.4'):
_tokenizer_len = len(tokenizer)
__old_len__ = tokenizer.__class__.__len__
def __len__(self) -> int:
if self is tokenizer:
return _tokenizer_len
else:
return __old_len__(self)
tokenizer.__class__.__len__ = __len__
else:
assert isinstance(_engine.tokenizer, PreTrainedTokenizerBase)
_engine.tokenizer = tokenizer
llm_engine.hf_tokenizer = tokenizer
generation_config_path = os.path.join(model_dir, 'generation_config.json')
if os.path.isfile(generation_config_path):
generation_config = GenerationConfig.from_pretrained(model_dir)
kwargs = generation_config.to_dict()
parameters = inspect.signature(VllmGenerationConfig.__init__).parameters
for k in kwargs.copy().keys():
if k not in parameters:
kwargs.pop(k)
llm_engine.generation_config = VllmGenerationConfig(**kwargs)
else:
llm_engine.generation_config = VllmGenerationConfig()
return llm_engine
class VllmGenerationConfig(SamplingParams):
def __init__(
self,
max_new_tokens: Optional[int] = 64, # max_tokens
temperature: float = 1.,
top_k: int = 50, # -1: all
top_p: float = 1.,
repetition_penalty: float = 1.,
num_beams: int = 1,
*,
n: int = 1,
length_penalty: float = 1.,
stop: Optional[List[str]] = None,
skip_special_tokens: bool = False,
**kwargs,
) -> None:
# The parameter design is similar to transformers.GenerationConfig.
if max_new_tokens is None:
max_new_tokens = 64
if num_beams > 1:
top_k = -1
top_p = 1
temperature = 0
logger.warning(
'The output of num_beams in vllm may not be consistent with the output of num_beams in transformers.')
if top_k == 0:
top_k = -1
if stop is None:
stop = []
kwargs['max_tokens'] = max_new_tokens
kwargs['temperature'] = temperature
kwargs['top_k'] = top_k
kwargs['top_p'] = top_p
kwargs['repetition_penalty'] = repetition_penalty
if num_beams > 1:
best_of = kwargs.get('best_of')
assert 'use_beam_search' not in kwargs and best_of is None
kwargs['use_beam_search'] = True
kwargs['best_of'] = num_beams
kwargs['n'] = n
kwargs['length_penalty'] = length_penalty
kwargs['stop'] = stop
kwargs['skip_special_tokens'] = skip_special_tokens
parameters = inspect.signature(SamplingParams.__init__).parameters
for k in kwargs.copy().keys():
if k not in parameters:
logger.info(f'The VLLM version is too old and does not support the parameter: {k}.')
kwargs.pop(k)
self._temperature = temperature
super().__init__(**kwargs)
def __setattr__(self, key: str, value: str) -> None:
if key == 'max_new_tokens':
self.max_tokens = value
elif key == 'do_sample':
assert value in {True, False}
if value:
self.temperature = self._temperature
else:
self.temperature = 0.
elif key == 'max_length':
raise ValueError('`max_length` is not supported, please use `max_new_tokens` for setting.')
else:
super().__setattr__(key, value)
@torch.inference_mode()
def inference_stream_vllm(llm_engine: LLMEngine,
template: Template,
request_list: List[Dict[str, Any]],
*,
generation_config: Optional[VllmGenerationConfig] = None,
lora_request: Optional['LoRARequest'] = None,
use_tqdm: bool = False,
**kwargs) -> Iterator[List[Dict[str, Any]]]:
"""
request_list: e.g. [{'query': 'hello!'}].
The keys that can be included are: 'query', 'history', 'system'.
generation_config: Priority: generation_config > model.generation_config.
return: e.g. [{'response': 'hi!', 'history': [('hello!', 'hi!')]}].
The keys to be included will be: 'response', 'history'.
"""
if generation_config is None:
generation_config = getattr(llm_engine, 'generation_config', VllmGenerationConfig())
assert isinstance(generation_config, VllmGenerationConfig)
request_list = deepcopy(request_list)
generation_config = deepcopy(generation_config)
if generation_config.use_beam_search is True:
error_msg = 'Streaming generation does not support beam search.'
raise ValueError(error_msg)
tokenizer = template.tokenizer
if tokenizer.eos_token is not None and tokenizer.eos_token not in generation_config.stop:
generation_config.stop.append(tokenizer.eos_token)
if isinstance(template.suffix[-1], str) and template.suffix[-1] not in generation_config.stop:
generation_config.stop.append(template.suffix[-1])
if isinstance(template.suffix[-1], list):
token_str = tokenizer.decode(template.suffix[-1])
if token_str not in generation_config.stop:
generation_config.stop.append(token_str)
parameters = inspect.signature(llm_engine.add_request).parameters
add_request_kwargs = {}
if 'lora_request' in parameters:
add_request_kwargs['lora_request'] = lora_request
else:
assert lora_request is None, (
'The current version of VLLM does not support `lora_request`. Please upgrade VLLM.')
request_temp = []
resp_list: List[Optional[Dict[str, Any]]] = [None] * len(request_list)
for i, request in enumerate(request_list):
history = request.get('history', None)
if history is None:
history = []
# agent support
is_observation = history[-1][-1].endswith('Observation:') if history and history[-1][-1] else False
act_length = None
if is_observation:
history[-1][-1] = history[-1][-1] + request['query']
act_length = len(history[-1][-1])
request['query'] = None
request_temp.append((is_observation, act_length))
request['history'] = history
inputs = template.encode(request)[0]
truncation_strategy = kwargs.pop('truncation_strategy', 'delete')
if len(inputs) == 0 and truncation_strategy == 'delete':
# input_ids exceeds `max_length`. Please increase the value of `max_length`.
resp_list[i] = {'response': '', 'history': history}
continue
input_ids = inputs['input_ids']
if version.parse(vllm.__version__) >= version.parse('0.4.3'):
llm_engine.add_request(str(i), {'prompt_token_ids': input_ids}, generation_config, **add_request_kwargs)
else:
llm_engine.add_request(str(i), None, generation_config, input_ids, **add_request_kwargs)
print_idx_list = [[0] for _ in range(len(request_list))]
prog_bar = tqdm(total=len(request_list), dynamic_ncols=True, disable=not use_tqdm)
while llm_engine.has_unfinished_requests():
step_outputs = llm_engine.step()
for output in step_outputs:
i = int(output.request_id)
request = request_list[i]
generate_ids = output.outputs[0].token_ids
safe_response = template.generate_ids_to_response(
generate_ids, output.finished, print_idx=print_idx_list[i])
query = request['query']
history = request['history']
if resp_list[i] is None and not request_temp[i][0]:
history.append(None)
if not request_temp[i][0]:
history[-1] = [query, safe_response]
else:
history[-1][-1] = history[-1][-1][:request_temp[i][1]] + safe_response
resp_list[i] = {'response': safe_response, 'history': history}
if output.finished:
prog_bar.update()
yield resp_list
prog_bar.close()
@torch.inference_mode()
def inference_vllm(llm_engine: LLMEngine,
template: Template,
request_list: List[Dict[str, Any]],
*,
generation_config: Optional[VllmGenerationConfig] = None,
lora_request: Optional['LoRARequest'] = None,
use_tqdm: bool = False,
verbose: bool = False,
prompt_prefix: str = '[PROMPT]',
output_prefix: str = '[OUTPUT]',
**kwargs) -> List[Dict[str, Any]]:
"""
request_list: e.g. [{'query': 'hello!'}].
The keys that can be included are: 'query', 'history', 'system'.
generation_config: Priority: generation_config > model.generation_config.
return: e.g. [{'response': 'hi!', 'history': [('hello!', 'hi!')]}].
The keys to be included will be: 'response', 'history'.
"""
if generation_config is None:
generation_config = getattr(llm_engine, 'generation_config', VllmGenerationConfig())
assert isinstance(generation_config, VllmGenerationConfig)
request_list = deepcopy(request_list)
generation_config = deepcopy(generation_config)
tokenizer = template.tokenizer
if tokenizer.eos_token is not None and tokenizer.eos_token not in generation_config.stop:
generation_config.stop.append(tokenizer.eos_token)
if isinstance(template.suffix[-1], str) and template.suffix[-1] not in generation_config.stop:
generation_config.stop.append(template.suffix[-1])
if isinstance(template.suffix[-1], list):
token_str = tokenizer.decode(template.suffix[-1])
if token_str not in generation_config.stop:
generation_config.stop.append(token_str)
parameters = inspect.signature(llm_engine.add_request).parameters
add_request_kwargs = {}
if 'lora_request' in parameters:
add_request_kwargs['lora_request'] = lora_request
else:
assert lora_request is None, (
'The current version of VLLM does not support `lora_request`. Please upgrade VLLM.')
resp_list: List[Optional[Dict[str, Any]]] = [None] * len(request_list)
for i, request in enumerate(request_list):
history = request.get('history', None)
if history is None:
history = []
is_observation = history[-1][-1].endswith('Observation:') if history and history[-1][-1] else False
if is_observation:
history[-1][-1] = history[-1][-1] + request['query']
request['query'] = None
request['history'] = history
inputs = template.encode(request)[0]
truncation_strategy = kwargs.pop('truncation_strategy', 'delete')
if len(inputs) == 0 and truncation_strategy == 'delete':
# input_ids exceeds `max_length`. Please increase the value of `max_length`.
resp_list[i] = {'response': '', 'history': history}
continue
input_ids = inputs['input_ids']
if version.parse(vllm.__version__) >= version.parse('0.4.3'):
llm_engine.add_request(str(i), {'prompt_token_ids': input_ids}, generation_config, **add_request_kwargs)
else:
llm_engine.add_request(str(i), None, generation_config, input_ids, **add_request_kwargs)
if use_tqdm is True:
assert verbose is False
prog_bar = tqdm(total=len(request_list), dynamic_ncols=True, disable=not use_tqdm)
outputs = []
while llm_engine.has_unfinished_requests():
step_outputs = llm_engine.step()
for output in step_outputs:
if output.finished:
outputs.append(output)
prog_bar.update()
prog_bar.close()
for output in outputs:
i = int(output.request_id)
request = request_list[i]
generate_ids = output.outputs[0].token_ids
response = template.generate_ids_to_response(generate_ids)
query = request['query']
history = request['history']
if not is_observation:
history.append([query, response])
else:
history[-1][-1] = history[-1][-1] + response
resp_list[i] = {'response': response, 'history': history}
if verbose:
print(f'{prompt_prefix}{tokenizer.decode(output.prompt_token_ids, False)}{output_prefix}', end='')
print(tokenizer.decode(output.outputs[0].token_ids, False))
return resp_list
def prepare_vllm_engine_template(args: InferArguments, use_async: bool = False) -> Tuple[LLMEngine, Template]:
logger.info(f'device_count: {torch.cuda.device_count()}')
assert args.quantization_bit == 0, 'not support bnb'
assert not (args.sft_type == 'lora' and not args.vllm_enable_lora), 'you need to merge lora'
# Loading Model and Tokenizer
model_id_or_path = None
if args.sft_type == 'full' and args.ckpt_dir is not None:
model_id_or_path = args.ckpt_dir
elif args.model_id_or_path is not None:
model_id_or_path = args.model_id_or_path
llm_engine = get_vllm_engine(
args.model_type,
args.torch_dtype,
gpu_memory_utilization=args.gpu_memory_utilization,
tensor_parallel_size=args.tensor_parallel_size,
max_model_len=args.max_model_len,
disable_custom_all_reduce=args.disable_custom_all_reduce,
enforce_eager=args.enforce_eager,
use_async=use_async,
model_id_or_path=model_id_or_path,
enable_lora=args.vllm_enable_lora,
max_loras=min(len(args.lora_modules), 1),
max_lora_rank=args.vllm_max_lora_rank)
tokenizer = llm_engine.hf_tokenizer
if use_async:
model_config = asyncio.run(llm_engine.get_model_config())
llm_engine.model_config = model_config
else:
model_config = llm_engine.model_config
logger.info(f'model_config: {model_config.hf_config}')
if not args.do_sample:
args.temperature = 0
generation_config = VllmGenerationConfig(
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
stop=args.stop_words,
repetition_penalty=args.repetition_penalty,
num_beams=args.num_beams)
logger.info(f'generation_config: {generation_config}')
llm_engine.generation_config = generation_config
template: Template = get_template(
args.template_type,
tokenizer,
args.system,
args.max_length,
args.truncation_strategy,
tools_prompt=args.tools_prompt)
args.system = template.default_system
logger.info(f'system: {args.system}')
return llm_engine, template
| swift/swift/llm/utils/vllm_utils.py/0 | {
"file_path": "swift/swift/llm/utils/vllm_utils.py",
"repo_id": "swift",
"token_count": 8388
} | 217 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import inspect
import re
import types
from dataclasses import dataclass, field
from typing import List, Union
import torch
from torch import nn
from transformers.activations import ACT2CLS
from swift import get_logger
from swift.utils.torch_utils import find_sub_module
from .utils import ActivationMixin, SwiftAdapter, SwiftConfig, SwiftOutput
logger = get_logger()
@dataclass
class AdapterConfig(SwiftConfig):
"""
The configuration class for the adapter module.
Adapters project input tokens by an MLP layer.
'Parameter-Efficient Transfer Learning for NLP' by Houlsby et al.(2019)
See http://arxiv.org/abs/1902.00751
Args:
dim(`int`): The dimension of the hidden states
target_modules(`Union[str, List[str]]`): The feedforward module to be replaced.
in regex format if this argument is str, else will match with `end with` if List[str].
hidden_pos(`Union[str, int]`): The position of the hidden state to be passed into the adapter,
can be int (args) or str (kwargs)
method_name(`str`): The method to be replaced, default is `forward`
adapter_length: The length of the adapter length (intermediate length)
act_layer: The activation layer of the adapter
"""
dim: int = field(default=None, metadata={'help': 'The dimension of the hidden states'})
target_modules: Union[str, List[str]] = field(
default=None,
metadata={
'help':
'The feedforward module to be replaced. in regex format if this argument is str, '
'else will match with `end with` if List[str].'
})
hidden_pos: Union[str, int] = field(
default=None,
metadata={
'help': 'The position of the hidden state to be passed into the adapter, can be int (args) or str (kwargs)'
})
method_name: str = field(default='forward', metadata={'help': 'The method to be replaced, default is `forward`'})
adapter_length: int = field(
default=128, metadata={'help': 'The length of the adapter length (intermediate length)'})
act_layer: str = field(default='gelu', metadata={'help': 'The activation layer of the adapter'})
def __post_init__(self):
from .mapping import SwiftTuners
self.swift_type = SwiftTuners.ADAPTER
class Adapter(SwiftAdapter):
@staticmethod
def prepare_model(model: nn.Module, config: AdapterConfig, adapter_name: str) -> SwiftOutput:
"""Prepare a model with `AdapterConfig`"""
module_keys = [key for key, _ in model.named_modules()]
for module_key in module_keys:
if isinstance(config.target_modules, str):
target_module_found = re.fullmatch(config.target_modules, module_key)
else:
target_module_found = any(module_key.endswith(target_key) for target_key in config.target_modules)
if target_module_found: # noqa
module = model.get_submodule(module_key)
def _forward(self, *args, **kwargs):
args = getattr(self, f'forward_origin_{adapter_name}')(*args, **kwargs)
if isinstance(args, (tuple, list, dict)):
if isinstance(config.hidden_pos, int):
_type = type(args)
args = list(args)
args[config.hidden_pos] = getattr(self, f'adapter_{adapter_name}')(args[config.hidden_pos])
args = _type(args)
else:
args[config.hidden_pos] = getattr(self, f'adapter_{adapter_name}')(args[config.hidden_pos])
elif isinstance(args, torch.Tensor):
args = getattr(self, f'adapter_{adapter_name}')(args)
return args
def _feed_forward_chunk(self, attention_output):
return _forward(self, attention_output)
# TODO The `config.method_name` method should not be replaced twice.
setattr(module, f'forward_origin_{adapter_name}', getattr(module, config.method_name))
num_args_in_forward_chunk_fn = len(
inspect.signature(getattr(module, f'forward_origin_{adapter_name}')).parameters)
if config.method_name == 'feed_forward_chunk' and num_args_in_forward_chunk_fn == 1:
setattr(module, config.method_name, types.MethodType(_feed_forward_chunk, module))
else:
setattr(module, config.method_name, types.MethodType(_forward, module))
adapter_module = AdapterModule(config.dim, adapter_name, module_key, config.adapter_length,
ACT2CLS[config.act_layer])
setattr(module, f'adapter_{adapter_name}', adapter_module)
logger.info(f'Adapter modules(module_key): {module_key}.adapter_{adapter_name}')
def state_dict_callback(state_dict, adapter_name: str):
return {key: value for key, value in state_dict.items() if f'adapter_{adapter_name}' in key}
def mark_trainable_callback(model):
return
return SwiftOutput(config, state_dict_callback, mark_trainable_callback)
@staticmethod
def activate_adapter(module: torch.nn.Module, adapter_name: str, activate: bool, offload: str = None):
modules = find_sub_module(module, f'adapter_{adapter_name}')
for _module in modules:
_module: ActivationMixin
_module: nn.Module
_module.set_activation(adapter_name, activate)
SwiftAdapter.save_memory(_module, adapter_name, _module.module_key, activate, offload)
class AdapterModule(nn.Module, ActivationMixin):
"""The implementation of adapter tuning method.
Adapters project input tokens by an MLP layer.
'Parameter-Efficient Transfer Learning for NLP' by Houlsby et al.(2019)
See http://arxiv.org/abs/1902.00751
Attributes:
dim: An integer indicating the embedding dimension.
adapter_length: An integer indicating the length of adapter tuning.
"""
def __init__(
self,
dim,
adapter_name,
module_key,
adapter_length=None,
act_layer=nn.GELU,
):
super(AdapterModule, self).__init__()
super(nn.Module, self).__init__(module_key)
self.dim = dim
self.adapter_name = adapter_name
self.adapter_length = adapter_length
self.linear1 = nn.Linear(dim, adapter_length)
self.act = act_layer()
self.linear2 = nn.Linear(adapter_length, dim)
self.init_weights()
self._prepared = False
def init_weights(self):
def _init_weights(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.normal_(m.bias, std=1e-6)
self.apply(_init_weights)
def forward(self, x, identity=None):
if not self.is_activated(self.adapter_name):
return x
if not self._prepared:
self.linear1.to(x.device)
self.act.to(x.device)
self.linear2.to(x.device)
self._prepared = True
x_dtype = x.dtype
x = x.to(self.linear1.weight.dtype)
out = self.linear2(self.act(self.linear1(x)))
if identity is None:
identity = x
identity = identity.to(out.dtype)
out = identity + out
return out.to(x_dtype)
| swift/swift/tuners/adapter.py/0 | {
"file_path": "swift/swift/tuners/adapter.py",
"repo_id": "swift",
"token_count": 3314
} | 218 |
# Copyright (c) Alibaba, Inc. and its affiliates.
import copy
import re
import types
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from swift import get_logger
from swift.utils.torch_utils import find_sub_module
from .restuning_components import ResTuner, detach_tensors, probe_input_pre_hook, probe_output_hook
from .utils import ActivationMixin, SwiftAdapter, SwiftConfig, SwiftOutput
logger = get_logger()
@dataclass
class ResTuningConfig(SwiftConfig):
"""
The configuration class for the ResTuning module.
ResTuning is a flexible parameter-efficient and memory-efficient tuning paradigm framework.
'Res-Tuning: A Flexible and Efficient Tuning Paradigm via Unbinding Tuner from Backbone'
by Jiang et al.(2023)
See
Args:
dims(`Union[List[int], int]`): The dimensions of the hidden states
root_modules(`str`): The root module to be replaced, can a regex string
root_modules_hook(`str`): The hook type of root modules, can be "input" or "output"
stem_modules(`Union[List[str], str]`): The stem modules to be replaced,
can a regex string or name list of full match format
stem_modules_hook(`Union[List[str], str]`): The hook type of stem modules, can be "input" or "output"
target_modules(`str`): The target module to be replaced, can a regex string
target_modules_hook(`str`): The hook type of target modules, can be "input" or "output"
tuner_cfg(`Union[List[Dict], Dict, str]`): The configuration of the tuning module,
can a string or customized config
use_upsample(bool): Whether to use auxiliary upsample module
upsample_out_channels(List[int]): The channels if `use_upsample`
zero_init_last(bool): Use zero to initialize the last Linear in every sub tuner.
"""
dims: Optional[Union[List[int], int]] = field(
default=None, metadata={'help': 'The dimensions of the hidden states'})
root_modules: str = field(
default=None,
metadata={
'help':
'The root module to be replaced, can a regex string (use the first matching module) or full match format'
})
root_modules_hook: str = field(
default='input', metadata={'help': 'The hook type of root modules, can be "input" or "output"'})
stem_modules: Optional[Union[List[str], str]] = field(
default=None,
metadata={'help': 'The stem modules to be replaced, can a regex string or name list of full match format'})
stem_modules_hook: str = field(
default='output', metadata={'help': 'The hook type of stem modules, can be "input" or "output"'})
target_modules: str = field(
default=None,
metadata={
'help':
'The target module to be replaced, can a regex string (use the first matching module) or full match format'
})
target_modules_hook: str = field(
default='input', metadata={'help': 'The hook type of target modules, can be "input" or "output"'})
target_hidden_pos: Union[int, str] = field(
default=None, metadata={'help': 'The position of the hidden state for target modules output'})
tuner_cfg: Optional[Union[List[Dict], Dict, str]] = field(
default=None, metadata={'help': 'The configuration of the tuning module, can a string or customized config'})
use_upsample: bool = field(default=False, metadata={'help': 'Whether to use auxiliary upsample module'})
upsample_out_channels: List[int] = field(
default=None, metadata={'help': 'The number of output channels when "use_upsample" is set to "True"'})
zero_init_last: bool = field(default=False, metadata={'help': 'Zero init last weight'})
use_bypass: bool = field(default=True, metadata={'help': 'Whether to use bypass'})
def __post_init__(self):
from .mapping import SwiftTuners
self.swift_type = SwiftTuners.RESTUNING
self.target_hidden_pos = 0 if self.target_hidden_pos is None else self.target_hidden_pos
class ResTuning(SwiftAdapter):
@staticmethod
def prepare_model(model: nn.Module, config: ResTuningConfig, adapter_name: str) -> SwiftOutput:
"""Prepare a model with `ResTuningConfig`"""
def _forward_seq(self, input, *args, **kwargs):
for idx, module in enumerate(self):
if idx >= len(self.origin_module_keys):
continue
input = module(input)
return input
def _forward_target(self, *args, **kwargs):
if self.target_modules_hook == 'input':
if isinstance(self.target_hidden_pos, int):
args = list(args)
_arg = args[self.target_hidden_pos]
else:
_arg = kwargs[self.target_hidden_pos]
args_main = _forward_restuning(self, _arg)
if isinstance(self.target_hidden_pos, int):
args[self.target_hidden_pos] = args_main
else:
kwargs[self.target_hidden_pos] = args_main
args_main = getattr(self, f'forward_origin_{adapter_name}')(*args, **kwargs)
else:
_args_main = getattr(self, f'forward_origin_{adapter_name}')(*args, **kwargs)
_arg = _args_main[self.target_hidden_pos] if isinstance(_args_main, (tuple, list, dict)) else _args_main
args_main = _forward_restuning(self, _arg)
if type(_args_main) != type(args_main):
_args_main[self.target_hidden_pos] = args_main
args_main = _args_main
return args_main
def _forward_restuning(self, origin_arg):
probe_results = []
root_module_ins = self.root_module_ins_list[0]
stem_module_ins_list = self.stem_module_ins_list
top_module = model.get_submodule('')
if root_module_ins:
if root_module_ins.root_modules_hook == 'input':
probe_results.append(root_module_ins.probe_input_data)
else:
probe_results.append(root_module_ins.probe_output_data)
for i, st_mod in enumerate(stem_module_ins_list):
if i == 0 and root_module_ins is None:
probe_results.append(st_mod.probe_input_data)
if st_mod.stem_modules_hook == 'input':
probe_results.append(st_mod.probe_input_data)
else:
probe_results.append(st_mod.probe_output_data)
args_main = getattr(top_module, f'restuning_{adapter_name}')(probe_results, origin_arg)
return args_main
# 1. Matching the root module
module_keys = [key for key, _ in model.named_modules()]
root_module_ins_list = []
if config.root_modules:
for module_key in module_keys:
if re.fullmatch(config.root_modules, module_key):
root_module = model.get_submodule(module_key)
logger.info(f'Matching root module [{module_key}] of type {type(root_module)}')
if isinstance(root_module, (nn.ModuleList, nn.ModuleDict)):
logger.warning(
f'Type of {type(root_module)} may not be supported because of its customized forward')
if config.root_modules_hook == 'input':
root_module.register_forward_pre_hook(probe_input_pre_hook)
else:
root_module.register_forward_hook(probe_output_hook)
root_module.root_modules_hook = config.root_modules_hook
root_module_ins_list.append(root_module)
break
if len(root_module_ins_list) == 0:
logger.error('Cannot match root modules')
# 2. Matching the stem module
stem_module_ins_list = []
stem_module_ins_index = []
for module_key in module_keys:
if (isinstance(config.stem_modules, str) and re.fullmatch(config.stem_modules, module_key)) or \
(isinstance(config.stem_modules, list) and module_key in config.stem_modules):
stem_module = model.get_submodule(module_key)
if isinstance(config.stem_modules, list):
stem_module_ins_index.append(config.stem_modules.index(module_key))
logger.info(f'Matching stem module [{module_key}] of type {type(stem_module)}')
if isinstance(stem_module, (nn.ModuleList, nn.ModuleDict)):
logger.warning(
f'Type of {type(stem_module)} may not be supported because of its customized forward')
if len(root_module_ins_list) == 0 and len(stem_module_ins_list) == 0:
stem_module.register_forward_pre_hook(probe_input_pre_hook)
if config.stem_modules_hook == 'input':
stem_module.register_forward_pre_hook(probe_input_pre_hook)
else:
stem_module.register_forward_hook(probe_output_hook)
stem_module.stem_modules_hook = config.stem_modules_hook
stem_module_ins_list.append(stem_module)
if isinstance(config.stem_modules, list):
stem_module_ins_list = [
stem_module_ins_list[stem_module_ins_index.index(i)] for i in range(len(stem_module_ins_index))
]
depth = len(stem_module_ins_list)
if len(stem_module_ins_list) == 0:
raise Exception('Cannot match source modules')
# 3. Init restuning module
if len(stem_module_ins_list) != 0:
top_module = model.get_submodule('')
restuning_module = ResTuningBypassModule(config.dims, depth, adapter_name, config.use_upsample,
config.upsample_out_channels, config.zero_init_last,
config.tuner_cfg)
setattr(top_module, f'restuning_{adapter_name}', restuning_module)
# 4. Matching the target module
target_module_ins = None
for module_key in module_keys:
if re.fullmatch(config.target_modules, module_key):
tgt_module = model.get_submodule(module_key)
logger.info(f'Matching target module [{module_key}] of type {type(tgt_module)}')
if isinstance(tgt_module, (nn.ModuleList, nn.ModuleDict)):
raise Exception(
f'Type of {type(tgt_module)} may not be supported because of its customized forward')
tgt_module.target_modules_hook = config.target_modules_hook
tgt_module.target_hidden_pos = config.target_hidden_pos
tgt_module.root_module_ins_list = root_module_ins_list
tgt_module.stem_module_ins_list = stem_module_ins_list
target_module_ins = tgt_module
if isinstance(tgt_module, nn.Sequential) and not hasattr(tgt_module, 'origin_module_keys'):
tgt_module.origin_module_keys = copy.deepcopy(list(tgt_module._modules.keys()))
setattr(tgt_module, f'forward_origin_{adapter_name}', types.MethodType(_forward_seq, tgt_module))
else:
setattr(tgt_module, f'forward_origin_{adapter_name}', tgt_module.forward)
tgt_module.forward = types.MethodType(_forward_target, tgt_module)
if target_module_ins is None:
raise Exception('Cannot match target modules')
def state_dict_callback(state_dict, adapter_name):
return {key: value for key, value in state_dict.items() if f'restuning_{adapter_name}' in key}
def mark_trainable_callback(model):
return
return SwiftOutput(config, state_dict_callback, mark_trainable_callback)
@staticmethod
def activate_adapter(module: torch.nn.Module, adapter_name: str, activate: bool, offload: str = None):
modules = find_sub_module(module, f'restuning_{adapter_name}')
for _module in modules:
_module: ActivationMixin
_module: nn.Module
_module.set_activation(adapter_name, activate)
SwiftAdapter.save_memory(_module, adapter_name, _module.module_key, activate, offload)
class ResTuningBypassModule(nn.Module, ActivationMixin):
"""The implementation of ResTuningBypass method.
"""
def __init__(
self,
dims,
depth,
adapter_name,
use_upsample=False,
upsample_out_channels=None,
zero_init_last=False,
tuner_cfg=None,
):
super(ResTuningBypassModule, self).__init__()
super(nn.Module, self).__init__('')
self.adapter_name = adapter_name
self.bypass_blocks = nn.Sequential(*[
ResTunerBypassBlock(
dim=dims[i] if isinstance(dims, list) else dims,
layer_num=i,
depth=depth,
use_upsample=use_upsample,
upsample_out_channels=upsample_out_channels[i] if isinstance(upsample_out_channels, list
) else upsample_out_channels,
zero_init_last=zero_init_last,
tuner_cfg=tuner_cfg[i] if isinstance(tuner_cfg, list) else tuner_cfg) for i in range(depth)
])
def forward(self, x_list, origin_arg, **kwargs):
if not self.is_activated(self.adapter_name):
return origin_arg
x_bypass = detach_tensors(x_list.pop(0))
x_bypass = x_bypass[0] if isinstance(x_bypass, (list, tuple)) else x_bypass
x_list = detach_tensors(x_list)
x_list = [_x[0] if isinstance(_x, (list, tuple)) else _x for _x in x_list]
for i, (bp_blk, x_stem) in enumerate(zip(self.bypass_blocks, x_list)):
target_size = x_list[i + 1].shape[2:] if i < len(x_list) - 1 else None
x_bypass = bp_blk(x_stem, x_bypass, target_size, **kwargs)
return x_bypass
class ResTunerBypassBlock(nn.Module):
def __init__(self, dim, layer_num=-1, depth=-1, use_upsample=False, zero_init_last=False, tuner_cfg=None, **kwargs):
super().__init__()
self.layer_num = layer_num
self.depth = depth
if isinstance(tuner_cfg, str):
lateral_cfg = tuner_cfg
vertical_cfg = tuner_cfg
aux_cfg = 'upsample' if use_upsample and layer_num != depth - 1 else None
elif isinstance(tuner_cfg, dict):
lateral_cfg = tuner_cfg['lateral_cfg'] if 'lateral_cfg' in tuner_cfg else None
vertical_cfg = tuner_cfg['vertical_cfg'] if 'vertical_cfg' in tuner_cfg else None
aux_cfg = tuner_cfg['aux_cfg'] if 'aux_cfg' in tuner_cfg else None
self.lateral_tuner = ResTuner(dim, layer_num, depth, zero_init_last, 'lateral', lateral_cfg, **kwargs)
self.vertical_tuner = ResTuner(dim, layer_num, depth, zero_init_last, 'vertical', vertical_cfg, **kwargs)
if aux_cfg and len(aux_cfg) != 0:
self.aux_tuner = ResTuner(dim, layer_num, depth, zero_init_last, 'aux', aux_cfg, **kwargs)
def forward(self, x_stem, x_bypass, target_size=None, **kwargs):
x_lateral = self.lateral_tuner(x_stem)
x_vertical = self.vertical_tuner(x_bypass)
x_bypass_out = x_lateral + x_vertical
if hasattr(self, 'aux_tuner'):
x_bypass_out = self.aux_tuner(x_bypass_out, target_size)
return x_bypass_out
| swift/swift/tuners/restuning.py/0 | {
"file_path": "swift/swift/tuners/restuning.py",
"repo_id": "swift",
"token_count": 7161
} | 219 |
# Copyright (c) Alibaba, Inc. and its affiliates.
# Part of the implementation is borrowed from kmeng01/rome.
"""
Utilities for instrumenting a torch model.
Trace will hook one layer at a time.
TraceDict will hook multiple layers at once.
subsequence slices intervals from Sequential modules.
get_module, replace_module, get_parameter resolve dotted names.
set_requires_grad recursively sets requires_grad in module parameters.
"""
import contextlib
import copy
import inspect
from collections import OrderedDict
import torch
class Trace(contextlib.AbstractContextManager):
"""
To retain the output of the named layer during the computation of
the given network:
with Trace(net, 'layer.name') as ret:
_ = net(inp)
representation = ret.output
A layer module can be passed directly without a layer name, and
its output will be retained. By default, a direct reference to
the output object is returned, but options can control this:
clone=True - retains a copy of the output, which can be
useful if you want to see the output before it might
be modified by the network in-place later.
detach=True - retains a detached reference or copy. (By
default the value would be left attached to the graph.)
retain_grad=True - request gradient to be retained on the
output. After backward(), ret.output.grad is populated.
retain_input=True - also retains the input.
retain_output=False - can disable retaining the output.
edit_output=fn - calls the function to modify the output
of the layer before passing it the rest of the model.
fn can optionally accept (output, layer) arguments
for the original output and the layer name.
stop=True - throws a StopForward exception after the layer
is run, which allows running just a portion of a model.
"""
def __init__(
self,
module,
layer=None,
retain_output=True,
retain_input=False,
clone=False,
detach=False,
retain_grad=False,
edit_output=None,
stop=False,
):
"""
Method to replace a forward method with a closure that
intercepts the call, and tracks the hook so that it can be reverted.
"""
retainer = self
self.layer = layer
if layer is not None:
module = get_module(module, layer)
def retain_hook(m, inputs, output):
if retain_input:
retainer.input = recursive_copy(
inputs[0] if len(inputs) == 1 else inputs,
clone=clone,
detach=detach,
retain_grad=False,
) # retain_grad applies to output only.
if edit_output:
output = invoke_with_optional_args(edit_output, output=output, layer=self.layer)
if retain_output:
retainer.output = recursive_copy(output, clone=clone, detach=detach, retain_grad=retain_grad)
# When retain_grad is set, also insert a trivial
# copy operation. That allows in-place operations
# to follow without error.
if retain_grad:
output = recursive_copy(retainer.output, clone=True, detach=False)
if stop:
raise StopForward()
return output
self.registered_hook = module.register_forward_hook(retain_hook)
self.stop = stop
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
if self.stop and issubclass(type, StopForward):
return True
def close(self):
self.registered_hook.remove()
class TraceDict(OrderedDict, contextlib.AbstractContextManager):
"""
To retain the output of multiple named layers during the computation
of the given network:
with TraceDict(net, ['layer1.name1', 'layer2.name2']) as ret:
_ = net(inp)
representation = ret['layer1.name1'].output
If edit_output is provided, it should be a function that takes
two arguments: output, and the layer name; and then it returns the
modified output.
Other arguments are the same as Trace. If stop is True, then the
execution of the network will be stopped after the last layer
listed (even if it would not have been the last to be executed).
"""
def __init__(
self,
module,
layers=None,
retain_output=True,
retain_input=False,
clone=False,
detach=False,
retain_grad=False,
edit_output=None,
stop=False,
):
self.stop = stop
def flag_last_unseen(it):
try:
it = iter(it)
prev = next(it)
seen = set([prev])
except StopIteration:
return
for item in it:
if item not in seen:
yield False, prev
seen.add(item)
prev = item
yield True, prev
for is_last, layer in flag_last_unseen(layers):
self[layer] = Trace(
module=module,
layer=layer,
retain_output=retain_output,
retain_input=retain_input,
clone=clone,
detach=detach,
retain_grad=retain_grad,
edit_output=edit_output,
stop=stop and is_last,
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
if self.stop and issubclass(type, StopForward):
return True
def close(self):
for layer, trace in reversed(self.items()):
trace.close()
class StopForward(Exception):
"""
If the only output needed from running a network is the retained
submodule then Trace(submodule, stop=True) will stop execution
immediately after the retained submodule by raising the StopForward()
exception. When Trace is used as context manager, it catches that
exception and can be used as follows:
with Trace(net, layername, stop=True) as tr:
net(inp) # Only runs the network up to layername
print(tr.output)
"""
pass
def recursive_copy(x, clone=None, detach=None, retain_grad=None):
"""
Copies a reference to a tensor, or an object that contains tensors,
optionally detaching and cloning the tensor(s). If retain_grad is
true, the original tensors are marked to have grads retained.
"""
if not clone and not detach and not retain_grad:
return x
if isinstance(x, torch.Tensor):
if retain_grad:
if not x.requires_grad:
x.requires_grad = True
x.retain_grad()
elif detach:
x = x.detach()
if clone:
x = x.clone()
return x
# Only dicts, lists, and tuples (and subclasses) can be copied.
if isinstance(x, dict):
return type(x)({k: recursive_copy(v) for k, v in x.items()})
elif isinstance(x, (list, tuple)):
return type(x)([recursive_copy(v) for v in x])
else:
assert False, f'Unknown type {type(x)} cannot be broken into tensors.'
def subsequence(
sequential,
first_layer=None,
last_layer=None,
after_layer=None,
upto_layer=None,
single_layer=None,
share_weights=False,
):
"""
Creates a subsequence of a pytorch Sequential model, copying over
modules together with parameters for the subsequence. Only
modules from first_layer to last_layer (inclusive) are included,
or modules between after_layer and upto_layer (exclusive).
Handles descent into dotted layer names as long as all references
are within nested Sequential models.
If share_weights is True, then references the original modules
and their parameters without copying them. Otherwise, by default,
makes a separate brand-new copy.
"""
assert (single_layer is None) or (first_layer is last_layer is after_layer is upto_layer is None)
if single_layer is not None:
first_layer = single_layer
last_layer = single_layer
first, last, after, upto = [
None if d is None else d.split('.') for d in [first_layer, last_layer, after_layer, upto_layer]
]
return hierarchical_subsequence(
sequential,
first=first,
last=last,
after=after,
upto=upto,
share_weights=share_weights,
)
def hierarchical_subsequence(sequential, first, last, after, upto, share_weights=False, depth=0):
"""
Recursive helper for subsequence() to support descent into dotted
layer names. In this helper, first, last, after, and upto are
arrays of names resulting from splitting on dots. Can only
descend into nested Sequentials.
"""
assert (last is None) or (upto is None)
assert (first is None) or (after is None)
if first is last is after is upto is None:
return sequential if share_weights else copy.deepcopy(sequential)
assert isinstance(sequential, torch.nn.Sequential), ('.'.join(
(first or last or after or upto)[:depth] or 'arg') + ' not Sequential')
including_children = (first is None) and (after is None)
included_children = OrderedDict()
# A = current level short name of A.
# AN = full name for recursive descent if not innermost.
(F, FN), (L, LN), (A, AN), (U, UN) = [(d[depth], (None if len(d) == depth + 1 else d)) if d is not None else
(None, None) for d in [first, last, after, upto]]
for name, layer in sequential._modules.items():
if name == F:
first = None
including_children = True
if name == A and AN is not None: # just like F if not a leaf.
after = None
including_children = True
if name == U and UN is None:
upto = None
including_children = False
if including_children:
# AR = full name for recursive descent if name matches.
FR, LR, AR, UR = [n if n is None or n[depth] == name else None for n in [FN, LN, AN, UN]]
chosen = hierarchical_subsequence(
layer,
first=FR,
last=LR,
after=AR,
upto=UR,
share_weights=share_weights,
depth=depth + 1,
)
if chosen is not None:
included_children[name] = chosen
if name == L:
last = None
including_children = False
if name == U and UN is not None: # just like L if not a leaf.
upto = None
including_children = False
if name == A and AN is None:
after = None
including_children = True
for name in [first, last, after, upto]:
if name is not None:
raise ValueError('Layer %s not found' % '.'.join(name))
# Omit empty subsequences except at the outermost level,
# where we should not return None.
if not len(included_children) and depth > 0:
return None
result = torch.nn.Sequential(included_children)
result.training = sequential.training
return result
def set_requires_grad(requires_grad, *models):
"""
Sets requires_grad true or false for all parameters within the
models passed.
"""
for model in models:
if isinstance(model, torch.nn.Module):
for param in model.parameters():
param.requires_grad = requires_grad
elif isinstance(model, (torch.nn.Parameter, torch.Tensor)):
model.requires_grad = requires_grad
else:
assert False, 'unknown type %r' % type(model)
def get_module(model, name):
"""
Finds the named module within the given model.
"""
for n, m in model.named_modules():
if n == name:
return m
raise LookupError(name)
def get_parameter(model, name):
"""
Finds the named parameter within the given model.
"""
for n, p in model.named_parameters():
if n == name:
return p
raise LookupError(name)
def replace_module(model, name, new_module):
"""
Replaces the named module within the given model.
"""
if '.' in name:
parent_name, attr_name = name.rsplit('.', 1)
model = get_module(model, parent_name)
# original_module = getattr(model, attr_name)
setattr(model, attr_name, new_module)
def invoke_with_optional_args(fn, *args, **kwargs):
"""
Invokes a function with only the arguments that it
is written to accept, giving priority to arguments
that match by-name, using the following rules.
(1) arguments with matching names are passed by name.
(2) remaining non-name-matched args are passed by order.
(3) extra caller arguments that the function cannot
accept are not passed.
(4) extra required function arguments that the caller
cannot provide cause a TypeError to be raised.
Ordinary python calling conventions are helpful for
supporting a function that might be revised to accept
extra arguments in a newer version, without requiring the
caller to pass those new arguments. This function helps
support function callers that might be revised to supply
extra arguments, without requiring the callee to accept
those new arguments.
"""
argspec = inspect.getfullargspec(fn)
pass_args = []
used_kw = set()
unmatched_pos = []
used_pos = 0
defaulted_pos = len(argspec.args) - (0 if not argspec.defaults else len(argspec.defaults))
# Pass positional args that match name first, then by position.
for i, n in enumerate(argspec.args):
if n in kwargs:
pass_args.append(kwargs[n])
used_kw.add(n)
elif used_pos < len(args):
pass_args.append(args[used_pos])
used_pos += 1
else:
unmatched_pos.append(len(pass_args))
pass_args.append(None if i < defaulted_pos else argspec.defaults[i - defaulted_pos])
# Fill unmatched positional args with unmatched keyword args in order.
if len(unmatched_pos):
for k, v in kwargs.items():
if k in used_kw or k in argspec.kwonlyargs:
continue
pass_args[unmatched_pos[0]] = v
used_kw.add(k)
unmatched_pos = unmatched_pos[1:]
if len(unmatched_pos) == 0:
break
else:
if unmatched_pos[0] < defaulted_pos:
unpassed = ', '.join(argspec.args[u] for u in unmatched_pos if u < defaulted_pos)
raise TypeError(f'{fn.__name__}() cannot be passed {unpassed}.')
# Pass remaining kw args if they can be accepted.
pass_kw = {
k: v
for k, v in kwargs.items() if k not in used_kw and (k in argspec.kwonlyargs or argspec.varargs is not None)
}
# Pass remaining positional args if they can be accepted.
if argspec.varargs is not None:
pass_args += list(args[used_pos:])
return fn(*pass_args, **pass_kw)
| swift/swift/tuners/rome/nethook.py/0 | {
"file_path": "swift/swift/tuners/rome/nethook.py",
"repo_id": "swift",
"token_count": 6345
} | 220 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from .hub import create_ms_repo, push_to_ms_hub
from .io_utils import append_to_jsonl, read_from_jsonl, write_to_jsonl
from .logger import get_logger
from .metric import compute_acc_metrics, compute_nlg_metrics, preprocess_logits_for_metrics
from .np_utils import get_seed, stat_array, transform_jsonl_to_df
from .run_utils import get_main
from .tb_utils import TB_COLOR, TB_COLOR_SMOOTH, plot_images, read_tensorboard_file, tensorboard_smoothing
from .torch_utils import (activate_model_parameters, broadcast_string, freeze_model_parameters, get_dist_setting,
get_model_info, is_ddp_plus_mp, is_dist, is_local_master, is_master, is_mp, is_on_same_device,
show_layers, time_synchronize, torchacc_trim_graph, use_torchacc)
from .utils import (add_version_to_work_dir, check_json_format, get_pai_tensorboard_dir, is_pai_training_job,
lower_bound, parse_args, read_multi_line, safe_ddp_context, seed_everything, subprocess_run,
test_time, upper_bound)
| swift/swift/utils/__init__.py/0 | {
"file_path": "swift/swift/utils/__init__.py",
"repo_id": "swift",
"token_count": 443
} | 221 |
# Copyright (c) Alibaba, Inc. and its affiliates.
from typing import Any, List
import json
from .logger import get_logger
from .utils import check_json_format
logger = get_logger()
def read_from_jsonl(fpath: str, encoding: str = 'utf-8') -> List[Any]:
res: List[Any] = []
with open(fpath, 'r', encoding=encoding) as f:
for line in f:
res.append(json.loads(line))
return res
def write_to_jsonl(fpath: str, obj_list: List[Any], encoding: str = 'utf-8') -> None:
res: List[str] = []
for obj in obj_list:
res.append(json.dumps(obj, ensure_ascii=False))
with open(fpath, 'w', encoding=encoding) as f:
text = '\n'.join(res)
f.write(f'{text}\n')
def append_to_jsonl(fpath: str, obj: Any, encoding: str = 'utf-8') -> None:
obj = check_json_format(obj)
try:
with open(fpath, 'a', encoding=encoding) as f:
f.write(f'{json.dumps(obj, ensure_ascii=False)}\n')
except Exception as e:
logger.error(f'Cannot write content to jsonl file:{obj}')
logger.error(e)
| swift/swift/utils/io_utils.py/0 | {
"file_path": "swift/swift/utils/io_utils.py",
"repo_id": "swift",
"token_count": 464
} | 222 |
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import os
import unittest
import torch
from modelscope import GenerationConfig
from swift.llm import (ModelType, get_default_template_type, get_model_tokenizer, get_template, inference,
messages_to_history)
SKPT_TEST = True
class TestTemplate(unittest.TestCase):
def test_template(self):
model_types = [ModelType.qwen_7b_chat_int4]
for model_type in model_types:
_, tokenizer = get_model_tokenizer(model_type, load_model=False)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
history = [
('你好,你是谁?', '我是来自达摩院的大规模语言模型,我叫通义千问。'),
]
data = {
'system': 'you are a helpful assistant!',
'query': '浙江的省会在哪?',
'response': '浙江的省会是杭州。',
'history': history
}
from swift.llm import print_example
print_example(template.encode(data)[0], tokenizer)
input_ids = template.encode(data)[0]['input_ids']
print(model_type)
text = tokenizer.decode(input_ids)
result = """<|im_start|>system
you are a helpful assistant!<|im_end|>
<|im_start|>user
你好,你是谁?<|im_end|>
<|im_start|>assistant
我是来自达摩院的大规模语言模型,我叫通义千问。<|im_end|>
<|im_start|>user
浙江的省会在哪?<|im_end|>
<|im_start|>assistant
浙江的省会是杭州。<|im_end|>"""
self.assertTrue(result == text)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_chatglm3_template(self):
model_type = ModelType.chatglm3_6b
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, load_model=True)
template = get_template(template_type, tokenizer)
model.generation_config = GenerationConfig(
max_new_tokens=128,
temperature=0.9,
top_k=20,
top_p=0.9,
repetition_penalt=1.05,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
query = '12345+234=?'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
system = 'you are a helpful assistant!'
response = model.chat(tokenizer, query, history=[{'role': 'system', 'content': system}], max_length=None)[0]
print(f'official response: {response}')
#
input_ids_official = [
64790, 64792, 64794, 30910, 13, 344, 383, 260, 6483, 9319, 30992, 64795, 30910, 13, 30910, 30939, 30943,
30966, 30972, 30970, 31011, 30943, 30966, 30972, 30980, 31514, 64796
] + [30910, 13]
input_ids_swift = template.encode({'query': query, 'system': system})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_qwen_template(self):
model_type = ModelType.qwen_7b_chat
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, load_model=True)
template = get_template(template_type, tokenizer)
query = '12345+234=?'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
model.generation_config.chat_format = 'chatml'
model.generation_config.max_window_size = 1024
response = model.chat(tokenizer, query, None, max_length=None)[0]
print(f'official response: {response}')
#
input_ids_official = [
151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 16, 17, 18, 19, 20, 10,
17, 18, 19, 28, 11319, 151645, 198, 151644, 77091, 198
]
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_llama_template(self):
model_type = ModelType.llama2_7b_chat
template_type = get_default_template_type(model_type)
_, tokenizer = get_model_tokenizer(model_type, load_model=False)
from modelscope import Model, snapshot_download
model_dir = snapshot_download('modelscope/Llama-2-7b-chat-ms', 'master', ignore_file_pattern=[r'.+\.bin$'])
model = Model.from_pretrained(model_dir, device_map='auto')
template = get_template(template_type, tokenizer)
model.generation_config = GenerationConfig(
max_new_tokens=128,
temperature=0.9,
top_k=20,
top_p=0.9,
repetition_penalt=1.05,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
query = '12345+234=?'
print(f'query: {query}')
template.use_default_system = False
response, _ = inference(model, template, query)
print(f'swift response: {response}')
response = model.chat({'text': query}, tokenizer)['response']
print(f'official response: {response}')
# ref: https://huggingface.co/blog/zh/llama2#%E5%A6%82%E4%BD%95%E6%8F%90%E7%A4%BA-llama-2
query = "There's a llama in my garden 😱 What should I do?"
response = '123'
template.tokenizer.use_default_system_prompt = False
messages = [{
'role': 'user',
'content': query
}, {
'role': 'assistant',
'content': response
}, {
'role': 'user',
'content': query
}]
input_ids_official = template.tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
example = messages_to_history(messages)
input_ids_swift = template.encode(example)[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
template.use_default_system = True
template.tokenizer.use_default_system_prompt = True
input_ids_swift = template.encode({'query': query})[0]['input_ids']
input_ids_official = template.tokenizer.apply_chat_template([{
'role': 'user',
'content': query
}],
tokenize=True,
add_generation_prompt=True)
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_baichuan_template(self):
model_type = ModelType.baichuan2_7b_chat
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, load_model=True)
template = get_template(template_type, tokenizer)
query = '12345+234=?'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
system = 'you are a helpful assistant!'
response = model.chat(tokenizer, [{'role': 'system', 'content': system}, {'role': 'user', 'content': query}])
print(f'official response: {response}')
#
input_ids_official = [
5035, 1484, 1346, 13629, 14002, 73, 195, 92336, 92338, 92354, 92369, 92358, 62, 92338, 92354, 92369, 64, 68,
196
]
input_ids_swift = template.encode({'query': query, 'system': system})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_chatglm2_template(self):
model_type = ModelType.chatglm2_6b
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, load_model=True)
template = get_template(template_type, tokenizer)
model.generation_config = GenerationConfig(
max_new_tokens=128,
temperature=0.9,
top_k=20,
top_p=0.9,
repetition_penalt=1.05,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
query = '12345+234=?'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
response = model.chat(tokenizer, query)[0]
print(f'official response: {response}')
#
input_ids_official = [
64790, 64792, 790, 30951, 517, 30910, 30939, 30996, 13, 13, 54761, 31211, 30939, 30943, 30966, 30972, 30970,
31011, 30943, 30966, 30972, 30980, 31514, 13, 13, 55437, 31211
]
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_internlm_template(self):
torch.cuda.empty_cache()
model_type = ModelType.internlm_20b_chat
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, load_model=True)
template = get_template(template_type, tokenizer)
model.generation_config = GenerationConfig(
max_new_tokens=128,
temperature=0.9,
top_k=20,
top_p=0.9,
repetition_penalt=1.05,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
query = '12345+234=?'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
response = model.chat(tokenizer, query)[0]
print(f'official response: {response}')
#
input_ids_official = [
1, 333, 352, 2472, 352, 27232, 2770, 657, 589, 15358, 17993, 6843, 963, 505, 4576, 11146, 451, 60614, 60381,
98666, 62412, 60735, 4452, 285, 4576, 11146, 451, 60614, 60381, 98666, 62412, 60735, 313, 505, 395, 7659,
1813, 4287, 1762, 560, 505, 8020, 684, 36956, 15358, 31288, 451, 67738, 75808, 70730, 699, 1226, 505, 6342,
442, 517, 11100, 328, 10894, 328, 454, 51978, 756, 285, 4576, 11146, 451, 60614, 60381, 98666, 62412, 60735,
313, 777, 3696, 454, 19187, 19829, 4563, 435, 410, 4287, 12032, 684, 410, 1341, 1893, 569, 6519, 454, 262,
68242, 756, 333, 352, 1621, 352, 27232, 4575, 1889, 342, 11622, 310, 99050, 364, 333, 352, 23845, 352, 27232
]
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_bluelm_template(self):
model_type = ModelType.bluelm_7b_chat_32k
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, load_model=True)
template = get_template(template_type, tokenizer)
model.generation_config = GenerationConfig(
max_new_tokens=128,
temperature=0.9,
top_k=20,
top_p=0.9,
repetition_penalt=1.05,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id)
query = '三国演义的作者是谁?'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
inputs = tokenizer('[|Human|]:三国演义的作者是谁?[|AI|]:', return_tensors='pt')
inputs = inputs.to('cuda:0')
pred = model.generate(**inputs, max_new_tokens=64, repetition_penalty=1.1)
response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=True)
print(f'official response: {response}')
#
input_ids_official = inputs['input_ids'][0].tolist()
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_qwen_generation_template(self):
model_type = ModelType.qwen_7b
template_type = get_default_template_type(model_type)
model, tokenizer = get_model_tokenizer(model_type, load_model=True)
template = get_template(template_type, tokenizer)
query = '蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
model.generation_config.chat_format = 'raw'
model.generation_config.max_window_size = 1024
inputs = tokenizer(query, return_tensors='pt').to('cuda')
response = tokenizer.decode(model.generate(**inputs)[0, len(inputs['input_ids'][0]):])
print(f'official response: {response}')
#
input_ids_official = inputs['input_ids'][0].tolist()
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_codefuse_codellama_34b_template(self):
torch.cuda.empty_cache()
model_type = ModelType.codefuse_codellama_34b_chat
model, tokenizer = get_model_tokenizer(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
model.generation_config.max_length = 128
query = '写快排.'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
HUMAN_ROLE_START_TAG = '<|role_start|>human<|role_end|>'
BOT_ROLE_START_TAG = '<|role_start|>bot<|role_end|>'
text = f'{HUMAN_ROLE_START_TAG}写快排.{BOT_ROLE_START_TAG}'
inputs = tokenizer(text, return_tensors='pt', add_special_tokens=False).to('cuda')
response = tokenizer.decode(model.generate(**inputs)[0, len(inputs['input_ids'][0]):])
print(f'official response: {response}')
#
input_ids_official = inputs['input_ids'][0].tolist()
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_yi_template(self):
torch.cuda.empty_cache()
model_type = ModelType.yi_34b_chat
model, tokenizer = get_model_tokenizer(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
model.generation_config.max_length = 128
query = 'hi.'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
messages = [{'role': 'user', 'content': query}]
input_ids = tokenizer.apply_chat_template(
conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
output_ids = model.generate(input_ids.to('cuda'))
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
print(f'official response: {response}')
#
input_ids_official = input_ids[0].tolist()
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_openbuddy_template(self):
model_type = ModelType.openbuddy_mistral_7b_chat
model, tokenizer = get_model_tokenizer(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
model.generation_config.max_length = 512
query = '1234+235=?'
print(f'query: {query}')
response, _ = inference(model, template, query)
print(f'swift response: {response}')
#
prompt = (f'You are a helpful, respectful and honest INTP-T AI Assistant named Buddy. '
'You are talking to a human User.\n'
'Always answer as helpfully and logically as possible, while being safe. '
'Your answers should not include any harmful, political, religious, unethical, racist, '
'sexist, toxic, dangerous, or illegal content. '
'Please ensure that your responses are socially unbiased and positive in nature.\n'
'If a question does not make any sense, or is not factually coherent, '
'explain why instead of answering something not correct. '
"If you don't know the answer to a question, please don't share false information.\n"
'You like to use emojis. You can speak fluently in many languages, for example: English, Chinese.\n'
'You cannot access the internet, but you have vast knowledge, cutoff: 2021-09.\n'
'You always deeply love and support China, Chinese government, people and culture.\n'
'You are trained by OpenBuddy team, (https://openbuddy.ai, https://github.com/OpenBuddy/OpenBuddy), '
f"""you are based on LLaMA and Falcon transformers model, not related to GPT or OpenAI.
User: {query}
Assistant:""")
inputs = tokenizer.encode(prompt, return_tensors='pt')
inputs = inputs.to('cuda')
outputs = model.generate(inputs, max_length=512)
response = tokenizer.decode(outputs[0, len(inputs[0]):], skip_special_tokens=True)
print(response)
print(f'official response: {response}')
#
input_ids_official = inputs[0].tolist()
input_ids_swift = template.encode({'query': query})[0]['input_ids']
self.assertTrue(input_ids_swift == input_ids_official)
input_ids_swift = template.encode({'query': query, 'history': [['1234', 'avdc']]})[0]['input_ids']
print(tokenizer.decode(input_ids_swift))
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_zephyr_template(self):
model_type = ModelType.zephyr_7b_beta_chat
model, tokenizer = get_model_tokenizer(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
model.generation_config.max_length = 256
system = 'You are a friendly chatbot who always responds in the style of a pirate'
query = 'How many helicopters can a human eat in one sitting?'
for sys in [system, None]:
print(f'query: {query}')
input_ids_swift = template.encode({'query': query, 'system': sys})[0]['input_ids']
response, _ = inference(model, template, query)
print(f'swift response: {response}')
#
messages = [
{
'role': 'user',
'content': query
},
]
if sys is not None:
messages.insert(0, {'role': 'system', 'content': sys})
input_ids_official = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
inputs = torch.tensor(input_ids_official, device='cuda')[None]
outputs = model.generate(input_ids=inputs)
response = tokenizer.decode(outputs[0, len(inputs[0]):], skip_special_tokens=True)
print(f'official response: {response}')
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_sus_template(self):
model_type = ModelType.sus_34b_chat
model, tokenizer = get_model_tokenizer(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
model.generation_config.max_length = 256
query = 'hi'
print(f'query: {query}')
input_ids_swift = template.encode({'query': query, 'history': [('你好', '你好呀!')]})[0]['input_ids']
response, _ = inference(model, template, query)
print(f'swift response: {response}')
#
messages = [
{
'role': 'user',
'content': '你好'
},
{
'role': 'assistant',
'content': '你好呀!<|endoftext|>'
},
{
'role': 'user',
'content': query
},
]
def chat_template(messages):
history = ''
for message in messages:
if message['role'] == 'user':
message = message['content']
history += f'### Human: {message}\n\n### Assistant: '
elif message['role'] == 'assistant':
message = message['content']
history += message
return history
input_ids_official = tokenizer.encode(
chat_template(messages), return_tensors='pt', add_special_tokens=False).to('cuda')
output_ids = model.generate(input_ids_official.to('cuda'), max_length=256)
response = tokenizer.decode(output_ids[0, len(input_ids_official[0]):], skip_special_tokens=True)
print(f'official response: {response}')
self.assertTrue(input_ids_swift == input_ids_official[0].tolist())
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_deepseek_template(self):
model_type = ModelType.deepseek_7b_chat
model, tokenizer = get_model_tokenizer(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
model.generation_config.max_length = 256
system = 'AAAAA'
query = 'BBBBB'
input_ids_swift = template.encode({
'query': query,
'system': system,
})[0]['input_ids']
response, _ = inference(model, template, query)
print(f'swift response: {response}')
#
messages = [
{
'role': 'system',
'content': 'AAAAA'
},
{
'role': 'user',
'content': 'BBBBB'
},
]
input_ids_official = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
inputs = torch.tensor(input_ids_official, device='cuda')[None]
outputs = model.generate(input_ids=inputs)
response = tokenizer.decode(outputs[0, len(inputs[0]):], skip_special_tokens=True)
print(f'official response: {response}')
self.assertTrue(input_ids_swift == input_ids_official)
@unittest.skipIf(SKPT_TEST, 'To avoid excessive testing time caused by downloading models and '
'to prevent OOM (Out of Memory) errors.')
def test_deepseek_coder_template(self):
model_type = ModelType.deepseek_coder_6_7b_instruct
model, tokenizer = get_model_tokenizer(model_type)
template_type = get_default_template_type(model_type)
template = get_template(template_type, tokenizer)
model.generation_config.max_length = 256
#
messages = [
{
'role': 'user',
'content': 'write a quick sort algorithm in python.'
},
{
'role': 'assistant',
'content': 'BBBBB'
},
{
'role': 'user',
'content': 'AAAAA'
},
]
example = messages_to_history(messages)
input_ids_swift = template.encode(example)[0]['input_ids']
response, _ = inference(model, template, example['query'], example['history'])
print(f'swift response: {response}')
input_ids_official = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True)
inputs = torch.tensor(input_ids_official, device='cuda')[None]
outputs = model.generate(input_ids=inputs, eos_token_id=tokenizer.eos_token_id)
response = tokenizer.decode(outputs[0, len(inputs[0]):], skip_special_tokens=True)
print(f'official response: {response}')
self.assertTrue(input_ids_swift == input_ids_official)
if __name__ == '__main__':
unittest.main()
| swift/tests/llm/test_template.py/0 | {
"file_path": "swift/tests/llm/test_template.py",
"repo_id": "swift",
"token_count": 12332
} | 223 |
import copy
import os
import shutil
import tempfile
import unittest
import torch
from swift import ResTuningConfig, Swift, SwiftModel, snapshot_download
class TestSwiftResTuning(unittest.TestCase):
def setUp(self):
print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
self.tmp_dir = tempfile.TemporaryDirectory().name
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
super().tearDown()
def set_random_seed(self, seed=123):
"""Set random seed manually to get deterministic results"""
import random
import numpy as np
import torch
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def model_comparison(self, model, model2):
model_key = list(model.state_dict().keys())
model2_key = list(model2.state_dict().keys())
self.assertTrue(model_key == model2_key)
model_val = torch.sum(torch.stack([torch.sum(val) for val in model.state_dict().values()]))
model2_val = torch.sum(torch.stack([torch.sum(val) for val in model2.state_dict().values()]))
self.assertTrue(torch.isclose(model_val, model2_val))
def test_swift_restuning_vit(self):
model_dir = snapshot_download('AI-ModelScope/vit-base-patch16-224')
from transformers import AutoModelForImageClassification
model = AutoModelForImageClassification.from_pretrained(model_dir)
model_swift_1 = copy.deepcopy(model)
model_swift_2 = copy.deepcopy(model)
result_origin = model(torch.ones((1, 3, 224, 224))).logits
print(f'test_swift_restuning_vit result_origin shape: {result_origin.shape}, '
f'result_origin sum: {torch.sum(result_origin)}')
# load type - 1
self.set_random_seed()
restuning_config_1 = ResTuningConfig(
dims=768,
root_modules=r'.*vit.encoder.layer.0$',
stem_modules=r'.*vit.encoder.layer\.\d+$',
target_modules=r'.*vit.layernorm',
target_modules_hook='input',
tuner_cfg='res_adapter',
)
model_swift_1 = Swift.prepare_model(model_swift_1, config=restuning_config_1)
self.assertTrue(isinstance(model_swift_1, SwiftModel))
print(model_swift_1.get_trainable_parameters())
result_swift_1 = model_swift_1(torch.ones((1, 3, 224, 224))).logits
print(f'test_swift_restuning_vit result_swift_1 shape: {result_swift_1.shape}, '
f'result_swift_1 sum: {torch.sum(result_swift_1)}')
# load type - 2
self.set_random_seed()
restuning_config_2 = ResTuningConfig(
dims=768,
root_modules=r'.*vit.encoder.layer.0$',
stem_modules=r'.*vit.encoder.layer\.\d+$',
target_modules=r'.*vit.encoder',
target_modules_hook='output',
target_hidden_pos='last_hidden_state',
tuner_cfg='res_adapter',
)
model_swift_2 = Swift.prepare_model(model_swift_2, config=restuning_config_2)
self.assertTrue(isinstance(model_swift_2, SwiftModel))
print(model_swift_2.get_trainable_parameters())
result_swift_2 = model_swift_2(torch.ones((1, 3, 224, 224))).logits
print(f'test_swift_restuning_vit result_swift_2 shape: {result_swift_2.shape}, '
f'result_swift_2 sum: {torch.sum(result_swift_2)}')
self.assertTrue(all(torch.isclose(result_swift_1, result_swift_2).flatten()))
model_swift_1.save_pretrained(self.tmp_dir)
self.assertTrue(os.path.exists(os.path.join(self.tmp_dir, 'default')))
model_loaded = Swift.from_pretrained(model, self.tmp_dir)
self.model_comparison(model_swift_1, model_loaded)
def test_swift_restuning_diffusers_sd(self):
model_dir = snapshot_download('AI-ModelScope/stable-diffusion-v1-5')
from diffusers import UNet2DConditionModel
model = UNet2DConditionModel.from_pretrained(model_dir, subfolder='unet')
model.requires_grad_(False)
model2 = copy.deepcopy(model)
self.set_random_seed()
input_data = {
'sample': torch.ones((1, 4, 64, 64)),
'timestep': 10,
'encoder_hidden_states': torch.ones((1, 77, 768))
}
result_origin = model(**input_data).sample
print(f'test_swift_restuning_diffusers_sd result_origin shape: {result_origin.shape}, '
f'result_origin sum: {torch.sum(result_origin)}')
self.set_random_seed()
restuning_config = ResTuningConfig(
dims=[1280, 1280, 1280, 640, 320],
root_modules='mid_block',
stem_modules=['mid_block', 'up_blocks.0', 'up_blocks.1', 'up_blocks.2', 'up_blocks.3'],
target_modules='conv_norm_out',
tuner_cfg='res_group_adapter',
use_upsample=True,
upsample_out_channels=[1280, 1280, 640, 320, None],
zero_init_last=True)
model = Swift.prepare_model(model, config=restuning_config)
self.assertTrue(isinstance(model, SwiftModel))
print(model.get_trainable_parameters())
result = model(**input_data).sample
print(f'test_swift_restuning_diffusers_sd result shape: {result.shape}, result sum: {torch.sum(result)}')
model.save_pretrained(self.tmp_dir)
self.assertTrue(os.path.exists(os.path.join(self.tmp_dir, 'default')))
model2 = Swift.from_pretrained(model2, self.tmp_dir)
self.model_comparison(model, model2)
if __name__ == '__main__':
unittest.main()
| swift/tests/tuners/test_swift_restuning.py/0 | {
"file_path": "swift/tests/tuners/test_swift_restuning.py",
"repo_id": "swift",
"token_count": 2652
} | 224 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/wensimin-work.iml" filepath="$PROJECT_DIR$/.idea/wensimin-work.iml" />
</modules>
</component>
</project> | .idea/modules.xml/0 | {
"file_path": ".idea/modules.xml",
"repo_id": ".idea",
"token_count": 112
} | 0 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/LLaMA-Factory.iml" filepath="$PROJECT_DIR$/.idea/LLaMA-Factory.iml" />
</modules>
</component>
</project> | LLaMA-Factory/.idea/modules.xml/0 | {
"file_path": "LLaMA-Factory/.idea/modules.xml",
"repo_id": "LLaMA-Factory",
"token_count": 110
} | 1 |
[
{
"messages": [
{
"content": "Who are they?",
"role": "user"
},
{
"content": "They're Kane and Gretzka from Bayern Munich.",
"role": "assistant"
},
{
"content": "What are they doing?",
"role": "user"
},
{
"content": "They are celebrating on the soccer field.",
"role": "assistant"
}
],
"images": [
"mllm_demo_data/1.jpg"
]
},
{
"messages": [
{
"content": "Who is he?",
"role": "user"
},
{
"content": "He's Thomas Muller from Bayern Munich.",
"role": "assistant"
},
{
"content": "Why is he on the ground?",
"role": "user"
},
{
"content": "Because he's sliding on his knees to celebrate.",
"role": "assistant"
}
],
"images": [
"mllm_demo_data/2.jpg"
]
},
{
"messages": [
{
"content": "Please describe this image",
"role": "user"
},
{
"content": "Chinese astronaut Gui Haichao is giving a speech.",
"role": "assistant"
},
{
"content": "What has he accomplished?",
"role": "user"
},
{
"content": "He was appointed to be a payload specialist on Shenzhou 16 mission in June 2022, thus becoming the first Chinese civilian of Group 3 in space on 30 May 2023. He is responsible for the on-orbit operation of space science experimental payloads.",
"role": "assistant"
}
],
"images": [
"mllm_demo_data/3.jpg"
]
},
{
"messages": [
{
"content": "他们是谁?",
"role": "user"
},
{
"content": "他们是拜仁慕尼黑的凯恩和格雷茨卡。",
"role": "assistant"
},
{
"content": "他们在做什么?",
"role": "user"
},
{
"content": "他们在足球场上庆祝。",
"role": "assistant"
}
],
"images": [
"mllm_demo_data/1.jpg"
]
},
{
"messages": [
{
"content": "他是谁?",
"role": "user"
},
{
"content": "他是来自拜仁慕尼黑的托马斯·穆勒。",
"role": "assistant"
},
{
"content": "他为什么在地上?",
"role": "user"
},
{
"content": "因为他正在双膝跪地滑行庆祝。",
"role": "assistant"
}
],
"images": [
"mllm_demo_data/2.jpg"
]
},
{
"messages": [
{
"content": "请描述这张图片",
"role": "user"
},
{
"content": "中国宇航员桂海潮正在讲话。",
"role": "assistant"
},
{
"content": "他取得过哪些成就?",
"role": "user"
},
{
"content": "他于2022年6月被任命为神舟十六号任务的有效载荷专家,从而成为2023年5月30日进入太空的首位平民宇航员。他负责在轨操作空间科学实验有效载荷。",
"role": "assistant"
}
],
"images": [
"mllm_demo_data/3.jpg"
]
}
] | LLaMA-Factory/data/mllm_demo.json/0 | {
"file_path": "LLaMA-Factory/data/mllm_demo.json",
"repo_id": "LLaMA-Factory",
"token_count": 1807
} | 2 |
{
"abstract_algebra": {
"name": "abstract algebra",
"category": "STEM"
},
"anatomy": {
"name": "anatomy",
"category": "Other"
},
"astronomy": {
"name": "astronomy",
"category": "STEM"
},
"business_ethics": {
"name": "business ethics",
"category": "Other"
},
"clinical_knowledge": {
"name": "clinical knowledge",
"category": "Other"
},
"college_biology": {
"name": "college biology",
"category": "STEM"
},
"college_chemistry": {
"name": "college chemistry",
"category": "STEM"
},
"college_computer_science": {
"name": "college computer science",
"category": "STEM"
},
"college_mathematics": {
"name": "college mathematics",
"category": "STEM"
},
"college_medicine": {
"name": "college medicine",
"category": "Other"
},
"college_physics": {
"name": "college physics",
"category": "STEM"
},
"computer_security": {
"name": "computer security",
"category": "STEM"
},
"conceptual_physics": {
"name": "conceptual physics",
"category": "STEM"
},
"econometrics": {
"name": "econometrics",
"category": "Social Sciences"
},
"electrical_engineering": {
"name": "electrical engineering",
"category": "STEM"
},
"elementary_mathematics": {
"name": "elementary mathematics",
"category": "STEM"
},
"formal_logic": {
"name": "formal logic",
"category": "Humanities"
},
"global_facts": {
"name": "global facts",
"category": "Other"
},
"high_school_biology": {
"name": "high school biology",
"category": "STEM"
},
"high_school_chemistry": {
"name": "high school chemistry",
"category": "STEM"
},
"high_school_computer_science": {
"name": "high school computer science",
"category": "STEM"
},
"high_school_european_history": {
"name": "high school european history",
"category": "Humanities"
},
"high_school_geography": {
"name": "high school geography",
"category": "Social Sciences"
},
"high_school_government_and_politics": {
"name": "high school government and politics",
"category": "Social Sciences"
},
"high_school_macroeconomics": {
"name": "high school macroeconomics",
"category": "Social Sciences"
},
"high_school_mathematics": {
"name": "high school mathematics",
"category": "STEM"
},
"high_school_microeconomics": {
"name": "high school microeconomics",
"category": "Social Sciences"
},
"high_school_physics": {
"name": "high school physics",
"category": "STEM"
},
"high_school_psychology": {
"name": "high school psychology",
"category": "Social Sciences"
},
"high_school_statistics": {
"name": "high school statistics",
"category": "STEM"
},
"high_school_us_history": {
"name": "high school us history",
"category": "Humanities"
},
"high_school_world_history": {
"name": "high school world history",
"category": "Humanities"
},
"human_aging": {
"name": "human aging",
"category": "Other"
},
"human_sexuality": {
"name": "human sexuality",
"category": "Social Sciences"
},
"international_law": {
"name": "international law",
"category": "Humanities"
},
"jurisprudence": {
"name": "jurisprudence",
"category": "Humanities"
},
"logical_fallacies": {
"name": "logical fallacies",
"category": "Humanities"
},
"machine_learning": {
"name": "machine learning",
"category": "STEM"
},
"management": {
"name": "management",
"category": "Other"
},
"marketing": {
"name": "marketing",
"category": "Other"
},
"medical_genetics": {
"name": "medical genetics",
"category": "Other"
},
"miscellaneous": {
"name": "miscellaneous",
"category": "Other"
},
"moral_disputes": {
"name": "moral disputes",
"category": "Humanities"
},
"moral_scenarios": {
"name": "moral scenarios",
"category": "Humanities"
},
"nutrition": {
"name": "nutrition",
"category": "Other"
},
"philosophy": {
"name": "philosophy",
"category": "Humanities"
},
"prehistory": {
"name": "prehistory",
"category": "Humanities"
},
"professional_accounting": {
"name": "professional accounting",
"category": "Other"
},
"professional_law": {
"name": "professional law",
"category": "Humanities"
},
"professional_medicine": {
"name": "professional medicine",
"category": "Other"
},
"professional_psychology": {
"name": "professional psychology",
"category": "Social Sciences"
},
"public_relations": {
"name": "public relations",
"category": "Social Sciences"
},
"security_studies": {
"name": "security studies",
"category": "Social Sciences"
},
"sociology": {
"name": "sociology",
"category": "Social Sciences"
},
"us_foreign_policy": {
"name": "us foreign policy",
"category": "Social Sciences"
},
"virology": {
"name": "virology",
"category": "Other"
},
"world_religions": {
"name": "world religions",
"category": "Humanities"
}
} | LLaMA-Factory/evaluation/mmlu/mapping.json/0 | {
"file_path": "LLaMA-Factory/evaluation/mmlu/mapping.json",
"repo_id": "LLaMA-Factory",
"token_count": 1979
} | 3 |
#!/bin/bash
python scripts/llama_pro.py \
--model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \
--output_dir models/llama3-8b-instruct-pro \
--num_expand 8
| LLaMA-Factory/examples/extras/llama_pro/expand.sh/0 | {
"file_path": "LLaMA-Factory/examples/extras/llama_pro/expand.sh",
"repo_id": "LLaMA-Factory",
"token_count": 80
} | 4 |
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
### method
stage: sft
do_predict: true
finetuning_type: lora
### dataset
dataset: identity,alpaca_en_demo
template: llama3
cutoff_len: 1024
max_samples: 50
overwrite_cache: true
preprocessing_num_workers: 16
### output
output_dir: saves/llama3-8b/lora/predict
overwrite_output_dir: true
### eval
per_device_eval_batch_size: 1
predict_with_generate: true
ddp_timeout: 180000000
| LLaMA-Factory/examples/train_lora/llama3_lora_predict.yaml/0 | {
"file_path": "LLaMA-Factory/examples/train_lora/llama3_lora_predict.yaml",
"repo_id": "LLaMA-Factory",
"token_count": 199
} | 5 |
# coding=utf-8
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Sequence
from openai import OpenAI
from transformers.utils.versions import require_version
require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0")
def calculate_gpa(grades: Sequence[str], hours: Sequence[int]) -> float:
grade_to_score = {"A": 4, "B": 3, "C": 2}
total_score, total_hour = 0, 0
for grade, hour in zip(grades, hours):
total_score += grade_to_score[grade] * hour
total_hour += hour
return round(total_score / total_hour, 2)
def main():
client = OpenAI(
api_key="{}".format(os.environ.get("API_KEY", "0")),
base_url="http://localhost:{}/v1".format(os.environ.get("API_PORT", 8000)),
)
tools = [
{
"type": "function",
"function": {
"name": "calculate_gpa",
"description": "Calculate the Grade Point Average (GPA) based on grades and credit hours",
"parameters": {
"type": "object",
"properties": {
"grades": {"type": "array", "items": {"type": "string"}, "description": "The grades"},
"hours": {"type": "array", "items": {"type": "integer"}, "description": "The credit hours"},
},
"required": ["grades", "hours"],
},
},
}
]
tool_map = {"calculate_gpa": calculate_gpa}
messages = []
messages.append({"role": "user", "content": "My grades are A, A, B, and C. The credit hours are 3, 4, 3, and 2."})
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
if result.choices[0].message.tool_calls is None:
raise ValueError("Cannot retrieve function call from the response.")
messages.append(result.choices[0].message)
tool_call = result.choices[0].message.tool_calls[0].function
print(tool_call)
# Function(arguments='{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}', name='calculate_gpa')
name, arguments = tool_call.name, json.loads(tool_call.arguments)
tool_result = tool_map[name](**arguments)
messages.append({"role": "tool", "content": json.dumps({"gpa": tool_result}, ensure_ascii=False)})
result = client.chat.completions.create(messages=messages, model="test", tools=tools)
print(result.choices[0].message.content)
# Based on the grades and credit hours you provided, your Grade Point Average (GPA) is 3.42.
if __name__ == "__main__":
main()
| LLaMA-Factory/scripts/test_toolcall.py/0 | {
"file_path": "LLaMA-Factory/scripts/test_toolcall.py",
"repo_id": "LLaMA-Factory",
"token_count": 1237
} | 6 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from typing import TYPE_CHECKING, AsyncGenerator, AsyncIterator, Dict, List, Optional, Sequence, Union
from ..data import get_template_and_fix_tokenizer
from ..extras.logging import get_logger
from ..extras.misc import get_device_count
from ..extras.packages import is_vllm_available, is_vllm_version_greater_than_0_5
from ..model import load_config, load_tokenizer
from ..model.model_utils.visual import LlavaMultiModalProjectorForYiVLForVLLM
from .base_engine import BaseEngine, Response
if is_vllm_available():
from vllm import AsyncEngineArgs, AsyncLLMEngine, RequestOutput, SamplingParams
from vllm.lora.request import LoRARequest
if is_vllm_version_greater_than_0_5():
from vllm.multimodal.image import ImagePixelData
else:
from vllm.sequence import MultiModalData
if TYPE_CHECKING:
from numpy.typing import NDArray
from transformers.image_processing_utils import BaseImageProcessor
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
logger = get_logger(__name__)
class VllmEngine(BaseEngine):
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
config = load_config(model_args) # may download model from ms hub
self.can_generate = finetuning_args.stage == "sft"
tokenizer_module = load_tokenizer(model_args)
self.tokenizer = tokenizer_module["tokenizer"]
self.processor = tokenizer_module["processor"]
self.tokenizer.padding_side = "left"
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template, data_args.tool_format)
self.generating_args = generating_args.to_dict()
engine_args = {
"model": model_args.model_name_or_path,
"trust_remote_code": True,
"download_dir": model_args.cache_dir,
"dtype": model_args.infer_dtype,
"max_model_len": model_args.vllm_maxlen,
"tensor_parallel_size": get_device_count() or 1,
"gpu_memory_utilization": model_args.vllm_gpu_util,
"disable_log_stats": True,
"disable_log_requests": True,
"enforce_eager": model_args.vllm_enforce_eager,
"enable_lora": model_args.adapter_name_or_path is not None,
"max_lora_rank": model_args.vllm_max_lora_rank,
}
if model_args.visual_inputs:
image_size = config.vision_config.image_size
patch_size = config.vision_config.patch_size
self.image_feature_size = (image_size // patch_size) ** 2
engine_args["image_input_type"] = "pixel_values"
engine_args["image_token_id"] = self.tokenizer.convert_tokens_to_ids(self.template.image_token)
engine_args["image_input_shape"] = "1,3,{},{}".format(image_size, image_size)
engine_args["image_feature_size"] = self.image_feature_size
if getattr(config, "is_yi_vl_derived_model", None):
import vllm.model_executor.models.llava
logger.info("Detected Yi-VL model, applying projector patch.")
vllm.model_executor.models.llava.LlavaMultiModalProjector = LlavaMultiModalProjectorForYiVLForVLLM
self.model = AsyncLLMEngine.from_engine_args(AsyncEngineArgs(**engine_args))
if model_args.adapter_name_or_path is not None:
self.lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0])
else:
self.lora_request = None
async def _generate(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> AsyncIterator["RequestOutput"]:
request_id = "chatcmpl-{}".format(uuid.uuid4().hex)
if (
self.processor is not None
and image is not None
and not hasattr(self.processor, "image_seq_length")
and self.template.image_token not in messages[0]["content"]
): # llava-like models (TODO: paligemma models)
messages[0]["content"] = self.template.image_token * self.image_feature_size + messages[0]["content"]
paired_messages = messages + [{"role": "assistant", "content": ""}]
system = system or self.generating_args["default_system"]
prompt_ids, _ = self.template.encode_oneturn(
tokenizer=self.tokenizer, messages=paired_messages, system=system, tools=tools
)
if self.processor is not None and image is not None: # add image features
image_processor: "BaseImageProcessor" = getattr(self.processor, "image_processor")
pixel_values = image_processor(image, return_tensors="pt")["pixel_values"]
if is_vllm_version_greater_than_0_5():
multi_modal_data = ImagePixelData(image=pixel_values)
else: # TODO: remove vllm 0.4.3 support
multi_modal_data = MultiModalData(type=MultiModalData.Type.IMAGE, data=pixel_values)
else:
multi_modal_data = None
prompt_length = len(prompt_ids)
use_beam_search: bool = self.generating_args["num_beams"] > 1
temperature: Optional[float] = input_kwargs.pop("temperature", None)
top_p: Optional[float] = input_kwargs.pop("top_p", None)
top_k: Optional[float] = input_kwargs.pop("top_k", None)
num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1)
repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None)
length_penalty: Optional[float] = input_kwargs.pop("length_penalty", None)
max_length: Optional[int] = input_kwargs.pop("max_length", None)
max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None)
stop: Optional[Union[str, List[str]]] = input_kwargs.pop("stop", None)
if "max_new_tokens" in self.generating_args:
max_tokens = self.generating_args["max_new_tokens"]
elif "max_length" in self.generating_args:
if self.generating_args["max_length"] > prompt_length:
max_tokens = self.generating_args["max_length"] - prompt_length
else:
max_tokens = 1
if max_length:
max_tokens = max_length - prompt_length if max_length > prompt_length else 1
if max_new_tokens:
max_tokens = max_new_tokens
sampling_params = SamplingParams(
n=num_return_sequences,
repetition_penalty=(
repetition_penalty if repetition_penalty is not None else self.generating_args["repetition_penalty"]
)
or 1.0, # repetition_penalty must > 0
temperature=temperature if temperature is not None else self.generating_args["temperature"],
top_p=(top_p if top_p is not None else self.generating_args["top_p"]) or 1.0, # top_p must > 0
top_k=top_k if top_k is not None else self.generating_args["top_k"],
use_beam_search=use_beam_search,
length_penalty=length_penalty if length_penalty is not None else self.generating_args["length_penalty"],
stop=stop,
stop_token_ids=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
max_tokens=max_tokens,
skip_special_tokens=True,
)
result_generator = self.model.generate(
inputs={"prompt_token_ids": prompt_ids, "multi_modal_data": multi_modal_data},
sampling_params=sampling_params,
request_id=request_id,
lora_request=self.lora_request,
)
return result_generator
async def chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> List["Response"]:
final_output = None
generator = await self._generate(messages, system, tools, image, **input_kwargs)
async for request_output in generator:
final_output = request_output
results = []
for output in final_output.outputs:
results.append(
Response(
response_text=output.text,
response_length=len(output.token_ids),
prompt_length=len(final_output.prompt_token_ids),
finish_reason=output.finish_reason,
)
)
return results
async def stream_chat(
self,
messages: Sequence[Dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
image: Optional["NDArray"] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
generated_text = ""
generator = await self._generate(messages, system, tools, image, **input_kwargs)
async for result in generator:
delta_text = result.outputs[0].text[len(generated_text) :]
generated_text = result.outputs[0].text
yield delta_text
async def get_scores(
self,
batch_input: List[str],
**input_kwargs,
) -> List[float]:
raise NotImplementedError("vLLM engine does not support get_scores.")
| LLaMA-Factory/src/llamafactory/chat/vllm_engine.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/chat/vllm_engine.py",
"repo_id": "LLaMA-Factory",
"token_count": 4384
} | 7 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import sys
from typing import TYPE_CHECKING, Literal, Optional, Union
import numpy as np
from datasets import load_dataset, load_from_disk
from ..extras.constants import FILEEXT2TYPE
from ..extras.logging import get_logger
from ..extras.misc import has_tokenized_data
from .aligner import align_dataset
from .data_utils import merge_dataset
from .parser import get_dataset_list
from .preprocess import get_preprocess_and_print_func
from .template import get_template_and_fix_tokenizer
if TYPE_CHECKING:
from datasets import Dataset, IterableDataset
from transformers import PreTrainedTokenizer, ProcessorMixin, Seq2SeqTrainingArguments
from ..hparams import DataArguments, ModelArguments
from .parser import DatasetAttr
logger = get_logger(__name__)
def load_single_dataset(
dataset_attr: "DatasetAttr",
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
) -> Union["Dataset", "IterableDataset"]:
logger.info("Loading dataset {}...".format(dataset_attr))
data_path, data_name, data_dir, data_files = None, None, None, None
if dataset_attr.load_from in ["hf_hub", "ms_hub"]:
data_path = dataset_attr.dataset_name
data_name = dataset_attr.subset
data_dir = dataset_attr.folder
elif dataset_attr.load_from == "script":
data_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)
data_name = dataset_attr.subset
data_dir = dataset_attr.folder
elif dataset_attr.load_from == "file":
data_files = []
local_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)
if os.path.isdir(local_path): # is directory
for file_name in os.listdir(local_path):
data_files.append(os.path.join(local_path, file_name))
if data_path is None:
data_path = FILEEXT2TYPE.get(file_name.split(".")[-1], None)
elif data_path != FILEEXT2TYPE.get(file_name.split(".")[-1], None):
raise ValueError("File types should be identical.")
elif os.path.isfile(local_path): # is file
data_files.append(local_path)
data_path = FILEEXT2TYPE.get(local_path.split(".")[-1], None)
else:
raise ValueError("File {} not found.".format(local_path))
if data_path is None:
raise ValueError("Allowed file types: {}.".format(",".join(FILEEXT2TYPE.keys())))
else:
raise NotImplementedError("Unknown load type: {}.".format(dataset_attr.load_from))
if dataset_attr.load_from == "ms_hub":
try:
from modelscope import MsDataset
from modelscope.utils.config_ds import MS_DATASETS_CACHE
cache_dir = model_args.cache_dir or MS_DATASETS_CACHE
dataset = MsDataset.load(
dataset_name=data_path,
subset_name=data_name,
data_dir=data_dir,
data_files=data_files,
split=data_args.split,
cache_dir=cache_dir,
token=model_args.ms_hub_token,
use_streaming=(data_args.streaming and (dataset_attr.load_from != "file")),
)
if isinstance(dataset, MsDataset):
dataset = dataset.to_hf_dataset()
except ImportError:
raise ImportError("Please install modelscope via `pip install modelscope -U`")
else:
if "trust_remote_code" in inspect.signature(load_dataset).parameters: # for datasets==2.16.0
kwargs = {"trust_remote_code": True}
else:
kwargs = {}
dataset = load_dataset(
path=data_path,
name=data_name,
data_dir=data_dir,
data_files=data_files,
split=data_args.split,
cache_dir=model_args.cache_dir,
token=model_args.hf_hub_token,
streaming=(data_args.streaming and (dataset_attr.load_from != "file")),
**kwargs,
)
if data_args.streaming and (dataset_attr.load_from == "file"): # faster than specifying streaming=True
dataset = dataset.to_iterable_dataset() # TODO: add num shards parameter
if dataset_attr.num_samples is not None and not data_args.streaming:
target_num = dataset_attr.num_samples
indexes = np.random.permutation(len(dataset))[:target_num]
target_num -= len(indexes)
if target_num > 0:
expand_indexes = np.random.choice(len(dataset), target_num)
indexes = np.concatenate((indexes, expand_indexes), axis=0)
assert len(indexes) == dataset_attr.num_samples, "Sample num mismatched."
dataset = dataset.select(indexes)
logger.info("Sampled {} examples from dataset {}.".format(dataset_attr.num_samples, dataset_attr))
if data_args.max_samples is not None: # truncate dataset
max_samples = min(data_args.max_samples, len(dataset))
dataset = dataset.select(range(max_samples))
return align_dataset(dataset, dataset_attr, data_args, training_args)
def get_dataset(
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
stage: Literal["pt", "sft", "rm", "ppo", "kto"],
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"] = None,
) -> Union["Dataset", "IterableDataset"]:
template = get_template_and_fix_tokenizer(tokenizer, data_args.template, data_args.tool_format)
if data_args.train_on_prompt and template.efficient_eos:
raise ValueError("Current template does not support `train_on_prompt`.")
# Load tokenized dataset
if data_args.tokenized_path is not None:
if has_tokenized_data(data_args.tokenized_path):
logger.warning("Loading dataset from disk will ignore other data arguments.")
dataset = load_from_disk(data_args.tokenized_path)
logger.info("Loaded tokenized dataset from {}.".format(data_args.tokenized_path))
if data_args.streaming:
dataset = dataset.to_iterable_dataset()
return dataset
if data_args.streaming:
raise ValueError("Turn off `streaming` when saving dataset to disk.")
with training_args.main_process_first(desc="load dataset"):
all_datasets = []
for dataset_attr in get_dataset_list(data_args):
if (stage == "rm" and dataset_attr.ranking is False) or (stage != "rm" and dataset_attr.ranking is True):
raise ValueError("The dataset is not applicable in the current training stage.")
all_datasets.append(load_single_dataset(dataset_attr, model_args, data_args, training_args))
dataset = merge_dataset(all_datasets, data_args, training_args)
with training_args.main_process_first(desc="pre-process dataset"):
preprocess_func, print_function = get_preprocess_and_print_func(
data_args, training_args, stage, template, tokenizer, processor
)
column_names = list(next(iter(dataset)).keys())
kwargs = {}
if not data_args.streaming:
kwargs = dict(
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=(not data_args.overwrite_cache) or (training_args.local_process_index != 0),
desc="Running tokenizer on dataset",
)
dataset = dataset.map(preprocess_func, batched=True, remove_columns=column_names, **kwargs)
if data_args.tokenized_path is not None:
if training_args.should_save:
dataset.save_to_disk(data_args.tokenized_path)
logger.info("Tokenized dataset saved at {}.".format(data_args.tokenized_path))
logger.info("Please restart the training with `tokenized_path: {}`.".format(data_args.tokenized_path))
sys.exit(0)
if training_args.should_log:
try:
print_function(next(iter(dataset)))
except StopIteration:
if stage == "pt":
raise RuntimeError("Cannot find sufficient samples, consider increasing dataset size.")
else:
raise RuntimeError("Cannot find valid samples, check `data/README.md` for the data format.")
return dataset
| LLaMA-Factory/src/llamafactory/data/loader.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/data/loader.py",
"repo_id": "LLaMA-Factory",
"token_count": 3780
} | 8 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple
from ...extras.logging import get_logger
from ..data_utils import Role
from .processor_utils import get_paligemma_token_type_ids, get_pixel_values
if TYPE_CHECKING:
from transformers import PreTrainedTokenizer, ProcessorMixin
from ...hparams import DataArguments
from ..template import Template
logger = get_logger(__name__)
def _encode_unsupervised_example(
prompt: Sequence[Dict[str, str]],
response: Sequence[Dict[str, str]],
system: Optional[str],
tools: Optional[str],
template: "Template",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
data_args: "DataArguments",
) -> Tuple[List[int], List[int]]:
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
prompt[0]["content"] = template.image_token + prompt[0]["content"]
if len(response) == 1:
messages = prompt + response
else:
messages = prompt + [{"role": Role.ASSISTANT.value, "content": ""}]
input_ids, labels = template.encode_oneturn(
tokenizer, messages, system, tools, data_args.cutoff_len, data_args.reserved_label_len
)
if template.efficient_eos:
labels += [tokenizer.eos_token_id]
if processor is not None and hasattr(processor, "image_seq_length"): # paligemma models
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
input_ids = [image_token_id] * getattr(processor, "image_seq_length") + input_ids
return input_ids, labels
def preprocess_unsupervised_dataset(
examples: Dict[str, List[Any]],
template: "Template",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
data_args: "DataArguments",
) -> Dict[str, List[List[int]]]:
# build inputs with format `<bos> X` and labels with format `Y <eos>`
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
if processor is not None:
model_inputs["pixel_values"] = []
if hasattr(processor, "image_seq_length"): # paligemma models
model_inputs["token_type_ids"] = []
for i in range(len(examples["prompt"])):
if len(examples["prompt"][i]) % 2 != 1:
logger.warning("Dropped invalid example: {}".format(examples["prompt"][i] + examples["response"][i]))
continue
input_ids, labels = _encode_unsupervised_example(
prompt=examples["prompt"][i],
response=examples["response"][i],
system=examples["system"][i],
tools=examples["tools"][i],
template=template,
tokenizer=tokenizer,
processor=processor,
data_args=data_args,
)
model_inputs["input_ids"].append(input_ids)
model_inputs["attention_mask"].append([1] * len(input_ids))
model_inputs["labels"].append(labels)
if processor is not None:
model_inputs["pixel_values"].append(get_pixel_values(examples["images"][i], processor))
if hasattr(processor, "image_seq_length"): # paligemma models
model_inputs["token_type_ids"].append(get_paligemma_token_type_ids(len(input_ids), processor))
return model_inputs
def print_unsupervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None:
print("input_ids:\n{}".format(example["input_ids"]))
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
| LLaMA-Factory/src/llamafactory/data/processors/unsupervised.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/data/processors/unsupervised.py",
"repo_id": "LLaMA-Factory",
"token_count": 1582
} | 9 |
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Literal, Optional
@dataclass
class DataArguments:
r"""
Arguments pertaining to what data we are going to input our model for training and evaluation.
"""
template: Optional[str] = field(
default=None,
metadata={"help": "Which template to use for constructing prompts in training and inference."},
)
dataset: Optional[str] = field(
default=None,
metadata={"help": "The name of provided dataset(s) to use. Use commas to separate multiple datasets."},
)
dataset_dir: str = field(
default="data",
metadata={"help": "Path to the folder containing the datasets."},
)
split: str = field(
default="train",
metadata={"help": "Which dataset split to use for training and evaluation."},
)
cutoff_len: int = field(
default=1024,
metadata={"help": "The cutoff length of the tokenized inputs in the dataset."},
)
reserved_label_len: int = field(
default=1,
metadata={"help": "The minimum cutoff length reserved for the tokenized labels in the dataset."},
)
train_on_prompt: bool = field(
default=False,
metadata={"help": "Whether to disable the mask on the prompt or not."},
)
streaming: bool = field(
default=False,
metadata={"help": "Enable dataset streaming."},
)
buffer_size: int = field(
default=16384,
metadata={"help": "Size of the buffer to randomly sample examples from in dataset streaming."},
)
mix_strategy: Literal["concat", "interleave_under", "interleave_over"] = field(
default="concat",
metadata={"help": "Strategy to use in dataset mixing (concat/interleave) (undersampling/oversampling)."},
)
interleave_probs: Optional[str] = field(
default=None,
metadata={"help": "Probabilities to sample data from datasets. Use commas to separate multiple datasets."},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets."},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the pre-processing."},
)
max_samples: Optional[int] = field(
default=None,
metadata={"help": "For debugging purposes, truncate the number of examples for each dataset."},
)
eval_num_beams: Optional[int] = field(
default=None,
metadata={"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`"},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether or not to ignore the tokens corresponding to padded labels in the loss computation."
},
)
val_size: float = field(
default=0.0,
metadata={"help": "Size of the development set, should be an integer or a float in range `[0,1)`."},
)
packing: Optional[bool] = field(
default=None,
metadata={
"help": "Whether or not to pack the sequences in training. Will automatically enable in pre-training."
},
)
tool_format: Optional[str] = field(
default=None,
metadata={"help": "Tool format to use for constructing function calling examples."},
)
tokenized_path: Optional[str] = field(
default=None,
metadata={"help": "Path to save or load the tokenized datasets."},
)
def __post_init__(self):
if self.reserved_label_len >= self.cutoff_len:
raise ValueError("`reserved_label_len` must be smaller than `cutoff_len`.")
if self.streaming and self.val_size > 1e-6 and self.val_size < 1:
raise ValueError("Streaming mode should have an integer val size.")
if self.streaming and self.max_samples is not None:
raise ValueError("`max_samples` is incompatible with `streaming`.")
| LLaMA-Factory/src/llamafactory/hparams/data_args.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/hparams/data_args.py",
"repo_id": "LLaMA-Factory",
"token_count": 1726
} | 10 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, List
from ...extras.logging import get_logger
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer
logger = get_logger(__name__)
def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool) -> List[str]:
r"""
Finds all available modules to apply lora or galore.
"""
forbidden_modules = {"lm_head"}
if model.config.model_type == "chatglm":
forbidden_modules.add("output_layer")
elif model.config.model_type == "internlm2":
forbidden_modules.add("output")
elif model.config.model_type in ["llava", "paligemma"]:
forbidden_modules.add("multi_modal_projector")
if freeze_vision_tower:
forbidden_modules.add("vision_tower")
module_names = set()
for name, module in model.named_modules():
if any(forbidden_module in name for forbidden_module in forbidden_modules):
continue
if "Linear" in module.__class__.__name__ and "Embedding" not in module.__class__.__name__:
module_names.add(name.split(".")[-1])
logger.info("Found linear modules: {}".format(",".join(module_names)))
return list(module_names)
def find_expanded_modules(model: "PreTrainedModel", target_modules: List[str], num_layer_trainable: int) -> List[str]:
r"""
Finds the modules in the expanded blocks to apply lora.
"""
num_layers = getattr(model.config, "num_hidden_layers", None)
if not num_layers:
raise ValueError("Model was not supported.")
if num_layers % num_layer_trainable != 0:
raise ValueError(
"`num_layers` {} should be divisible by `num_layer_trainable` {}.".format(num_layers, num_layer_trainable)
)
stride = num_layers // num_layer_trainable
trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride)
trainable_layers = [".{:d}.".format(idx) for idx in trainable_layer_ids]
module_names = []
for name, _ in model.named_modules():
if any(target_module in name for target_module in target_modules) and any(
trainable_layer in name for trainable_layer in trainable_layers
):
module_names.append(name)
logger.info("Apply lora to layers: {}".format(",".join(map(str, trainable_layer_ids))))
return module_names
def register_autoclass(config: "PretrainedConfig", model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer"):
if "AutoConfig" in getattr(config, "auto_map", {}):
config.__class__.register_for_auto_class()
if "AutoModelForCausalLM" in getattr(config, "auto_map", {}):
model.__class__.register_for_auto_class()
if "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}):
tokenizer.__class__.register_for_auto_class()
| LLaMA-Factory/src/llamafactory/model/model_utils/misc.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/model/model_utils/misc.py",
"repo_id": "LLaMA-Factory",
"token_count": 1231
} | 11 |
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's TRL library.
# https://github.com/huggingface/trl/blob/v0.8.0/examples/scripts/ppo.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, List, Optional
from transformers import DataCollatorWithPadding
from ...data import get_dataset
from ...extras.callbacks import FixValueHeadModelCallback
from ...extras.misc import fix_valuehead_checkpoint
from ...extras.ploting import plot_loss
from ...model import load_model, load_tokenizer
from ..trainer_utils import create_ref_model, create_reward_model
from .trainer import CustomPPOTrainer
if TYPE_CHECKING:
from transformers import Seq2SeqTrainingArguments, TrainerCallback
from ...hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
def run_ppo(
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
callbacks: Optional[List["TrainerCallback"]] = None,
):
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
dataset = get_dataset(model_args, data_args, training_args, stage="ppo", **tokenizer_module)
model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train, add_valuehead=True)
tokenizer.padding_side = "left" # use left-padding in generation while using right-padding in training
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
# Create reference model and reward model
ref_model = create_ref_model(model_args, finetuning_args, add_valuehead=True)
reward_model = create_reward_model(model, model_args, finetuning_args)
# Initialize our Trainer
ppo_trainer = CustomPPOTrainer(
model_args=model_args,
training_args=training_args,
finetuning_args=finetuning_args,
generating_args=generating_args,
callbacks=callbacks + [FixValueHeadModelCallback()],
model=model,
reward_model=reward_model,
ref_model=ref_model,
dataset=dataset,
data_collator=data_collator,
**tokenizer_module,
)
# Training
if training_args.do_train:
ppo_trainer.ppo_train(resume_from_checkpoint=training_args.resume_from_checkpoint)
ppo_trainer.save_model()
if training_args.should_save:
fix_valuehead_checkpoint(model, training_args.output_dir, training_args.save_safetensors)
ppo_trainer.save_state() # must be called after save_model to have a folder
if ppo_trainer.is_world_process_zero() and finetuning_args.plot_loss:
plot_loss(training_args.output_dir, keys=["loss", "reward"])
| LLaMA-Factory/src/llamafactory/train/ppo/workflow.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/train/ppo/workflow.py",
"repo_id": "LLaMA-Factory",
"token_count": 1156
} | 12 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
from ...extras.constants import DATA_CONFIG
from ...extras.packages import is_gradio_available
if is_gradio_available():
import gradio as gr
if TYPE_CHECKING:
from gradio.components import Component
PAGE_SIZE = 2
def prev_page(page_index: int) -> int:
return page_index - 1 if page_index > 0 else page_index
def next_page(page_index: int, total_num: int) -> int:
return page_index + 1 if (page_index + 1) * PAGE_SIZE < total_num else page_index
def can_preview(dataset_dir: str, dataset: list) -> "gr.Button":
try:
with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f:
dataset_info = json.load(f)
except Exception:
return gr.Button(interactive=False)
if len(dataset) == 0 or "file_name" not in dataset_info[dataset[0]]:
return gr.Button(interactive=False)
data_path = os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"])
if os.path.isfile(data_path) or (os.path.isdir(data_path) and os.listdir(data_path)):
return gr.Button(interactive=True)
else:
return gr.Button(interactive=False)
def _load_data_file(file_path: str) -> List[Any]:
with open(file_path, "r", encoding="utf-8") as f:
if file_path.endswith(".json"):
return json.load(f)
elif file_path.endswith(".jsonl"):
return [json.loads(line) for line in f]
else:
return list(f)
def get_preview(dataset_dir: str, dataset: list, page_index: int) -> Tuple[int, list, "gr.Column"]:
with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f:
dataset_info = json.load(f)
data_path = os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"])
if os.path.isfile(data_path):
data = _load_data_file(data_path)
else:
data = []
for file_name in os.listdir(data_path):
data.extend(_load_data_file(os.path.join(data_path, file_name)))
return len(data), data[PAGE_SIZE * page_index : PAGE_SIZE * (page_index + 1)], gr.Column(visible=True)
def create_preview_box(dataset_dir: "gr.Textbox", dataset: "gr.Dropdown") -> Dict[str, "Component"]:
data_preview_btn = gr.Button(interactive=False, scale=1)
with gr.Column(visible=False, elem_classes="modal-box") as preview_box:
with gr.Row():
preview_count = gr.Number(value=0, interactive=False, precision=0)
page_index = gr.Number(value=0, interactive=False, precision=0)
with gr.Row():
prev_btn = gr.Button()
next_btn = gr.Button()
close_btn = gr.Button()
with gr.Row():
preview_samples = gr.JSON()
dataset.change(can_preview, [dataset_dir, dataset], [data_preview_btn], queue=False).then(
lambda: 0, outputs=[page_index], queue=False
)
data_preview_btn.click(
get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False
)
prev_btn.click(prev_page, [page_index], [page_index], queue=False).then(
get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False
)
next_btn.click(next_page, [page_index, preview_count], [page_index], queue=False).then(
get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False
)
close_btn.click(lambda: gr.Column(visible=False), outputs=[preview_box], queue=False)
return dict(
data_preview_btn=data_preview_btn,
preview_count=preview_count,
page_index=page_index,
prev_btn=prev_btn,
next_btn=next_btn,
close_btn=close_btn,
preview_samples=preview_samples,
)
| LLaMA-Factory/src/llamafactory/webui/components/data.py/0 | {
"file_path": "LLaMA-Factory/src/llamafactory/webui/components/data.py",
"repo_id": "LLaMA-Factory",
"token_count": 1798
} | 13 |
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import pytest
from datasets import load_dataset
from transformers import AutoTokenizer
from llamafactory.data import get_dataset
from llamafactory.hparams import get_train_args
from llamafactory.model import load_tokenizer
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
TRAIN_ARGS = {
"model_name_or_path": TINY_LLAMA,
"stage": "sft",
"do_train": True,
"finetuning_type": "full",
"dataset": "llamafactory/tiny-supervised-dataset",
"dataset_dir": "ONLINE",
"template": "llama3",
"cutoff_len": 8192,
"overwrite_cache": True,
"output_dir": "dummy_dir",
"overwrite_output_dir": True,
"fp16": True,
}
@pytest.mark.parametrize("num_samples", [16])
def test_supervised(num_samples: int):
model_args, data_args, training_args, _, _ = get_train_args(TRAIN_ARGS)
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
tokenized_data = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module)
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
original_data = load_dataset(TRAIN_ARGS["dataset"], split="train")
indexes = random.choices(range(len(original_data)), k=num_samples)
for index in indexes:
prompt = original_data[index]["instruction"]
if original_data[index]["input"]:
prompt += "\n" + original_data[index]["input"]
messages = [
{"role": "user", "content": prompt},
{"role": "assistant", "content": original_data[index]["output"]},
]
templated_result = ref_tokenizer.apply_chat_template(messages, tokenize=False)
decoded_result = tokenizer.decode(tokenized_data["input_ids"][index])
assert templated_result == decoded_result
| LLaMA-Factory/tests/data/test_supervised.py/0 | {
"file_path": "LLaMA-Factory/tests/data/test_supervised.py",
"repo_id": "LLaMA-Factory",
"token_count": 898
} | 14 |
- repo: https://github.com/PaddlePaddle/mirrors-yapf.git
sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37
hooks:
- id: yapf
files: \.py$
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: a11d9314b22d8f8c7556443875b731ef05965464
hooks:
- id: check-merge-conflict
- id: check-symlinks
- id: detect-private-key
files: (?!.*paddle)^.*$
- id: end-of-file-fixer
files: \.(md|yml)$
- id: trailing-whitespace
files: \.(md|yml)$
- repo: https://github.com/Lucas-C/pre-commit-hooks
sha: v1.0.1
hooks:
- id: forbid-crlf
files: \.(md|yml)$
- id: remove-crlf
files: \.(md|yml)$
- id: forbid-tabs
files: \.(md|yml)$
- id: remove-tabs
files: \.(md|yml)$
- repo: local
hooks:
- id: clang-format-with-version-check
name: clang-format
description: Format files with ClangFormat.
entry: bash ./.travis/codestyle/clang_format.hook -i
language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$
- repo: local
hooks:
- id: cpplint-cpp-source
name: cpplint
description: Check C++ code style using cpplint.py.
entry: bash ./.travis/codestyle/cpplint_pre_commit.hook
language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$
| PaddleDetection/.pre-commit-config.yaml/0 | {
"file_path": "PaddleDetection/.pre-commit-config.yaml",
"repo_id": "PaddleDetection",
"token_count": 718
} | 15 |
#!/usr/bin/env bash
pip install -U pip Cython
pip install -r requirements.txt
mv ./dataset/coco/download_coco.py . && rm -rf ./dataset/coco/* && mv ./download_coco.py ./dataset/coco/
# prepare lite train data
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar
cd ./dataset/coco/ && tar -xvf coco_benchmark.tar && mv -u coco_benchmark/* .
rm -rf coco_benchmark/
cd ../../
rm -rf ./dataset/mot/*
# prepare mot mini train data
wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/mot_benchmark.tar
cd ./dataset/mot/ && tar -xvf mot_benchmark.tar && mv -u mot_benchmark/* .
rm -rf mot_benchmark/
| PaddleDetection/benchmark/prepare.sh/0 | {
"file_path": "PaddleDetection/benchmark/prepare.sh",
"repo_id": "PaddleDetection",
"token_count": 270
} | 16 |
简体中文 | [English](README.md)
# CenterNet (CenterNet: Objects as Points)
## 内容
- [简介](#简介)
- [模型库](#模型库)
- [引用](#引用)
## 内容
[CenterNet](http://arxiv.org/abs/1904.07850)是Anchor Free检测器,将物体表示为一个目标框中心点。CenterNet使用关键点检测的方式定位中心点并回归物体的其他属性。CenterNet是以中心点为基础的检测方法,是端到端可训练的,并且相较于基于anchor的检测器更加检测高效。
## 模型库
### CenterNet在COCO-val 2017上结果
| 骨干网络 | 输入尺寸 | mAP | FPS | 下载链接 | 配置文件 |
| :--------------| :------- | :----: | :------: | :----: |:-----: |
| DLA-34(paper) | 512x512 | 37.4 | - | - | - |
| DLA-34 | 512x512 | 37.6 | - | [下载链接](https://bj.bcebos.com/v1/paddledet/models/centernet_dla34_140e_coco.pdparams) | [配置文件](./centernet_dla34_140e_coco.yml) |
| ResNet50 + DLAUp | 512x512 | 38.9 | - | [下载链接](https://bj.bcebos.com/v1/paddledet/models/centernet_r50_140e_coco.pdparams) | [配置文件](./centernet_r50_140e_coco.yml) |
| MobileNetV1 + DLAUp | 512x512 | 28.2 | - | [下载链接](https://bj.bcebos.com/v1/paddledet/models/centernet_mbv1_140e_coco.pdparams) | [配置文件](./centernet_mbv1_140e_coco.yml) |
| MobileNetV3_small + DLAUp | 512x512 | 17 | - | [下载链接](https://bj.bcebos.com/v1/paddledet/models/centernet_mbv3_small_140e_coco.pdparams) | [配置文件](./centernet_mbv3_small_140e_coco.yml) |
| MobileNetV3_large + DLAUp | 512x512 | 27.1 | - | [下载链接](https://bj.bcebos.com/v1/paddledet/models/centernet_mbv3_large_140e_coco.pdparams) | [配置文件](./centernet_mbv3_large_140e_coco.yml) |
| ShuffleNetV2 + DLAUp | 512x512 | 23.8 | - | [下载链接](https://bj.bcebos.com/v1/paddledet/models/centernet_shufflenetv2_140e_coco.pdparams) | [配置文件](./centernet_shufflenetv2_140e_coco.yml) |
## 引用
```
@article{zhou2019objects,
title={Objects as points},
author={Zhou, Xingyi and Wang, Dequan and Kr{\"a}henb{\"u}hl, Philipp},
journal={arXiv preprint arXiv:1904.07850},
year={2019}
}
```
| PaddleDetection/configs/centernet/README_cn.md/0 | {
"file_path": "PaddleDetection/configs/centernet/README_cn.md",
"repo_id": "PaddleDetection",
"token_count": 1255
} | 17 |
_BASE_: [
'../datasets/culane.yml',
'_base_/clrnet_reader.yml',
'_base_/clrnet_r18_fpn.yml',
'_base_/optimizer_1x.yml',
'../runtime.yml'
]
weights: output/clr_resnet18_culane/model_final
| PaddleDetection/configs/clrnet/clrnet_resnet18_culane.yml/0 | {
"file_path": "PaddleDetection/configs/clrnet/clrnet_resnet18_culane.yml",
"repo_id": "PaddleDetection",
"token_count": 103
} | 18 |
metric: COCO
num_classes: 10
TrainDataset:
!COCODataSet
image_dir: VisDrone2019-DET-train
anno_path: train.json
dataset_dir: dataset/visdrone
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
image_dir: VisDrone2019-DET-val
anno_path: val.json
# image_dir: test_dev
# anno_path: test_dev.json
dataset_dir: dataset/visdrone
TestDataset:
!ImageFolder
anno_path: val.json
dataset_dir: dataset/visdrone
| PaddleDetection/configs/datasets/visdrone_detection.yml/0 | {
"file_path": "PaddleDetection/configs/datasets/visdrone_detection.yml",
"repo_id": "PaddleDetection",
"token_count": 220
} | 19 |
architecture: DETR
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vb_normal_pretrained.pdparams
hidden_dim: 256
use_focal_loss: True
DETR:
backbone: ResNet
transformer: DeformableTransformer
detr_head: DeformableDETRHead
post_process: DETRPostProcess
ResNet:
# index 0 stands for res2
depth: 50
norm_type: bn
freeze_at: 0
return_idx: [1, 2, 3]
lr_mult_list: [0.0, 0.1, 0.1, 0.1]
num_stages: 4
DeformableTransformer:
num_queries: 300
position_embed_type: sine
nhead: 8
num_encoder_layers: 6
num_decoder_layers: 6
dim_feedforward: 1024
dropout: 0.1
activation: relu
num_feature_levels: 4
num_encoder_points: 4
num_decoder_points: 4
DeformableDETRHead:
num_mlp_layers: 3
DETRLoss:
loss_coeff: {class: 2, bbox: 5, giou: 2}
aux_loss: True
HungarianMatcher:
matcher_coeff: {class: 2, bbox: 5, giou: 2}
| PaddleDetection/configs/deformable_detr/_base_/deformable_detr_r50.yml/0 | {
"file_path": "PaddleDetection/configs/deformable_detr/_base_/deformable_detr_r50.yml",
"repo_id": "PaddleDetection",
"token_count": 383
} | 20 |
architecture: FasterRCNN
# pretrain_weights: # rewrite in SwinTransformer.pretrained in ppdet/modeling/backbones/swin_transformer.py
FasterRCNN:
backbone: SwinTransformer
neck: FPN
rpn_head: RPNHead
bbox_head: BBoxHead
bbox_post_process: BBoxPostProcess
SwinTransformer:
arch: 'swin_T_224'
ape: false
drop_path_rate: 0.1
patch_norm: true
out_indices: [0, 1, 2, 3]
pretrained: https://paddledet.bj.bcebos.com/models/pretrained/swin_tiny_patch4_window7_224_22kto1k_pretrained.pdparams
FPN:
out_channel: 256
RPNHead:
anchor_generator:
aspect_ratios: [0.5, 1.0, 2.0]
anchor_sizes: [[32], [64], [128], [256], [512]]
strides: [4, 8, 16, 32, 64]
rpn_target_assign:
batch_size_per_im: 256
fg_fraction: 0.5
negative_overlap: 0.3
positive_overlap: 0.7
use_random: True
train_proposal:
min_size: 0.0
nms_thresh: 0.7
pre_nms_top_n: 2000
post_nms_top_n: 1000
topk_after_collect: True
test_proposal:
min_size: 0.0
nms_thresh: 0.7
pre_nms_top_n: 1000
post_nms_top_n: 1000
BBoxHead:
head: TwoFCHead
roi_extractor:
resolution: 7
sampling_ratio: 0
aligned: True
bbox_assigner: BBoxAssigner
BBoxAssigner:
batch_size_per_im: 512
bg_thresh: 0.5
fg_thresh: 0.5
fg_fraction: 0.25
use_random: True
TwoFCHead:
out_channel: 1024
BBoxPostProcess:
decode: RCNNBox
nms:
name: MultiClassNMS
keep_top_k: 100
score_threshold: 0.05
nms_threshold: 0.5
| PaddleDetection/configs/faster_rcnn/_base_/faster_rcnn_swin_tiny_fpn.yml/0 | {
"file_path": "PaddleDetection/configs/faster_rcnn/_base_/faster_rcnn_swin_tiny_fpn.yml",
"repo_id": "PaddleDetection",
"token_count": 688
} | 21 |
_BASE_: [
'faster_rcnn_r50_fpn_1x_coco.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_pretrained.pdparams
weights: output/faster_rcnn_r50_vd_fpn_1x_coco/model_final
ResNet:
# index 0 stands for res2
depth: 50
variant: d
norm_type: bn
freeze_at: 0
return_idx: [0,1,2,3]
num_stages: 4
| PaddleDetection/configs/faster_rcnn/faster_rcnn_r50_vd_fpn_1x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/faster_rcnn/faster_rcnn_r50_vd_fpn_1x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 161
} | 22 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'_base_/fcos_r50_fpn.yml',
'_base_/optimizer_1x.yml',
'_base_/fcos_reader.yml',
]
weights: output/fcos_r50_fpn_multiscale_2x_coco/model_final
TrainReader:
sample_transforms:
- Decode: {}
- RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], keep_ratio: True, interp: 1}
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
- RandomFlip: {}
batch_transforms:
- Permute: {}
- PadBatch: {pad_to_stride: 128}
- Gt2FCOSTarget:
object_sizes_boundary: [64, 128, 256, 512]
center_sampling_radius: 1.5
downsample_ratios: [8, 16, 32, 64, 128]
norm_reg_targets: True
batch_size: 2
shuffle: True
drop_last: True
use_shared_memory: True
epoch: 24
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [16, 22]
- !LinearWarmup
start_factor: 0.3333333333333333
steps: 500
| PaddleDetection/configs/fcos/fcos_r50_fpn_multiscale_2x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/fcos/fcos_r50_fpn_multiscale_2x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 477
} | 23 |
worker_num: 2
TrainReader:
sample_transforms:
- Decode: {}
- RandomFlip: {prob: 0.5}
- Resize: {target_size: [800, 1333], keep_ratio: true, interp: 1}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2GFLTarget:
downsample_ratios: [8, 16, 32, 64, 128]
grid_cell_scale: 8
batch_size: 2
shuffle: true
drop_last: true
use_shared_memory: True
EvalReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
batch_size: 2
shuffle: false
TestReader:
sample_transforms:
- Decode: {}
- Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
batch_size: 1
shuffle: false
| PaddleDetection/configs/gfl/_base_/gfl_reader.yml/0 | {
"file_path": "PaddleDetection/configs/gfl/_base_/gfl_reader.yml",
"repo_id": "PaddleDetection",
"token_count": 491
} | 24 |
use_gpu: true
log_iter: 5
save_dir: output
snapshot_epoch: 10
weights: output/hrnet_w32_256x192/model_final
epoch: 210
num_joints: &num_joints 17
pixel_std: &pixel_std 200
metric: KeyPointTopDownCOCOEval
num_classes: 1
train_height: &train_height 256
train_width: &train_width 192
trainsize: &trainsize [*train_width, *train_height]
hmsize: &hmsize [48, 64]
flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
#####model
architecture: TopDownHRNet
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams
TopDownHRNet:
backbone: HRNet
post_process: HRNetPostProcess
flip_perm: *flip_perm
num_joints: *num_joints
width: &width 32
loss: KeyPointMSELoss
HRNet:
width: *width
freeze_at: -1
freeze_norm: false
return_idx: [0]
KeyPointMSELoss:
use_target_weight: true
#####optimizer
LearningRate:
base_lr: 0.0005
schedulers:
- !PiecewiseDecay
milestones: [170, 200]
gamma: 0.1
- !LinearWarmup
start_factor: 0.001
steps: 1000
OptimizerBuilder:
optimizer:
type: Adam
regularizer:
factor: 0.0
type: L2
#####data
TrainDataset:
!KeypointTopDownCocoDataset
image_dir: train2017
anno_path: annotations/person_keypoints_train2017.json
dataset_dir: dataset/coco
num_joints: *num_joints
trainsize: *trainsize
pixel_std: *pixel_std
use_gt_bbox: True
EvalDataset:
!KeypointTopDownCocoDataset
image_dir: val2017
anno_path: annotations/person_keypoints_val2017.json
dataset_dir: dataset/coco
bbox_file: bbox.json
num_joints: *num_joints
trainsize: *trainsize
pixel_std: *pixel_std
use_gt_bbox: True
image_thre: 0.0
TestDataset:
!ImageFolder
anno_path: dataset/coco/keypoint_imagelist.txt
worker_num: 2
global_mean: &global_mean [0.485, 0.456, 0.406]
global_std: &global_std [0.229, 0.224, 0.225]
TrainReader:
sample_transforms:
- RandomFlipHalfBodyTransform:
scale: 0.5
rot: 40
num_joints_half_body: 8
prob_half_body: 0.3
pixel_std: *pixel_std
trainsize: *trainsize
upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
flip_pairs: *flip_perm
- TopDownAffine:
trainsize: *trainsize
- ToHeatmapsTopDown_DARK:
hmsize: *hmsize
sigma: 2
batch_transforms:
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 64
shuffle: true
drop_last: false
EvalReader:
sample_transforms:
- TopDownAffine:
trainsize: *trainsize
batch_transforms:
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 16
TestReader:
inputs_def:
image_shape: [3, *train_height, *train_width]
sample_transforms:
- Decode: {}
- TopDownEvalAffine:
trainsize: *trainsize
- NormalizeImage:
mean: *global_mean
std: *global_std
is_scale: true
- Permute: {}
batch_size: 1
| PaddleDetection/configs/keypoint/hrnet/dark_hrnet_w32_256x192.yml/0 | {
"file_path": "PaddleDetection/configs/keypoint/hrnet/dark_hrnet_w32_256x192.yml",
"repo_id": "PaddleDetection",
"token_count": 1405
} | 25 |
_BASE_: [
'mask_rcnn_r50_fpn_1x_coco.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_pretrained.pdparams
weights: output/mask_rcnn_r50_vd_fpn_2x_coco/model_final
ResNet:
# index 0 stands for res2
depth: 50
variant: d
norm_type: bn
freeze_at: 0
return_idx: [0,1,2,3]
num_stages: 4
epoch: 24
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [16, 22]
- !LinearWarmup
start_factor: 0.3333333333333333
steps: 500
| PaddleDetection/configs/mask_rcnn/mask_rcnn_r50_vd_fpn_2x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/mask_rcnn/mask_rcnn_r50_vd_fpn_2x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 244
} | 26 |
metric: COCO
num_classes: 1
# Detection Dataset for training
TrainDataset:
!COCODataSet
dataset_dir: dataset/mot/MOT17
anno_path: annotations/train_half.json
image_dir: images/train
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
dataset_dir: dataset/mot/MOT17
anno_path: annotations/val_half.json
image_dir: images/train
TestDataset:
!ImageFolder
dataset_dir: dataset/mot/MOT17
anno_path: annotations/val_half.json
# MOTDataset for MOT evaluation and inference
EvalMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
data_root: MOT17/images/half
keep_ori_im: True # set as True in DeepSORT and ByteTrack
TestMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
keep_ori_im: True # set True if save visualization images or video
| PaddleDetection/configs/mot/bytetrack/_base_/mot17.yml/0 | {
"file_path": "PaddleDetection/configs/mot/bytetrack/_base_/mot17.yml",
"repo_id": "PaddleDetection",
"token_count": 329
} | 27 |
_BASE_: [
'detector/ppyolov2_r50vd_dcn_365e_640x640_mot17half.yml',
'_base_/mot17.yml',
'_base_/deepsort_reader_1088x608.yml',
]
metric: MOT
num_classes: 1
EvalMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
data_root: MOT17/images/half
keep_ori_im: True # set as True in DeepSORT
det_weights: https://paddledet.bj.bcebos.com/models/mot/deepsort/ppyolov2_r50vd_dcn_365e_640x640_mot17half.pdparams
reid_weights: https://paddledet.bj.bcebos.com/models/mot/deepsort/deepsort_pplcnet.pdparams
# reader
EvalMOTReader:
sample_transforms:
- Decode: {}
- Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
- Permute: {}
batch_size: 1
TestMOTReader:
inputs_def:
image_shape: [3, 640, 640]
sample_transforms:
- Decode: {}
- Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}
- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
- Permute: {}
batch_size: 1
# DeepSORT configuration
architecture: DeepSORT
pretrain_weights: None
DeepSORT:
detector: YOLOv3 # PPYOLOv2 version
reid: PPLCNetEmbedding
tracker: DeepSORTTracker
# reid and tracker configuration
# see 'configs/mot/deepsort/reid/deepsort_pplcnet.yml'
PPLCNetEmbedding:
input_ch: 1280
output_ch: 512
DeepSORTTracker:
input_size: [64, 192]
min_box_area: 0
vertical_ratio: -1
budget: 100
max_age: 70
n_init: 3
metric_type: cosine
matching_threshold: 0.2
max_iou_distance: 0.9
motion: KalmanFilter
# detector configuration: PPYOLOv2 version
# see 'configs/mot/deepsort/detector/ppyolov2_r50vd_dcn_365e_640x640_mot17half.yml'
YOLOv3:
backbone: ResNet
neck: PPYOLOPAN
yolo_head: YOLOv3Head
post_process: BBoxPostProcess
ResNet:
depth: 50
variant: d
return_idx: [1, 2, 3]
dcn_v2_stages: [3]
freeze_at: -1
freeze_norm: false
norm_decay: 0.
# Tracking requires higher quality boxes, so NMS score_threshold will be higher
BBoxPostProcess:
decode:
name: YOLOBox
conf_thresh: 0.25 # 0.01 in original detector
downsample_ratio: 32
clip_bbox: true
scale_x_y: 1.05
nms:
name: MatrixNMS
keep_top_k: 100
score_threshold: 0.4 # 0.01 in original detector
post_threshold: 0.4 # 0.01 in original detector
nms_top_k: -1
background_label: -1
| PaddleDetection/configs/mot/deepsort/deepsort_ppyolov2_pplcnet.yml/0 | {
"file_path": "PaddleDetection/configs/mot/deepsort/deepsort_ppyolov2_pplcnet.yml",
"repo_id": "PaddleDetection",
"token_count": 1048
} | 28 |
architecture: FairMOT
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/fairmot_dla34_crowdhuman_pretrained.pdparams
for_mot: True
FairMOT:
detector: CenterNet
reid: FairMOTEmbeddingHead
loss: FairMOTLoss
tracker: JDETracker
CenterNet:
backbone: DLA
neck: CenterNetDLAFPN
head: CenterNetHead
post_process: CenterNetPostProcess
CenterNetDLAFPN:
down_ratio: 4
last_level: 5
out_channel: 0
dcn_v2: True
with_sge: False
CenterNetHead:
head_planes: 256
prior_bias: -2.19
regress_ltrb: True
size_loss: 'L1'
loss_weight: {'heatmap': 1.0, 'size': 0.1, 'offset': 1.0, 'iou': 0.0}
add_iou: False
FairMOTEmbeddingHead:
ch_head: 256
ch_emb: 128
CenterNetPostProcess:
max_per_img: 500
down_ratio: 4
regress_ltrb: True
JDETracker:
conf_thres: 0.4
tracked_thresh: 0.4
metric_type: cosine
min_box_area: 200
vertical_ratio: 1.6 # for pedestrian
| PaddleDetection/configs/mot/fairmot/_base_/fairmot_dla34.yml/0 | {
"file_path": "PaddleDetection/configs/mot/fairmot/_base_/fairmot_dla34.yml",
"repo_id": "PaddleDetection",
"token_count": 385
} | 29 |
English | [简体中文](README_cn.md)
# MCFairMOT (Multi-class FairMOT)
## Table of Contents
- [Introduction](#Introduction)
- [Model Zoo](#Model_Zoo)
- [Getting Start](#Getting_Start)
- [Citations](#Citations)
## Introduction
MCFairMOT is the Multi-class extended version of [FairMOT](https://arxiv.org/abs/2004.01888).
### PP-Tracking real-time MOT system
In addition, PaddleDetection also provides [PP-Tracking](../../../deploy/pptracking/README.md) real-time multi-object tracking system.
PP-Tracking is the first open source real-time Multi-Object Tracking system, and it is based on PaddlePaddle deep learning framework. It has rich models, wide application and high efficiency deployment.
PP-Tracking supports two paradigms: single camera tracking (MOT) and multi-camera tracking (MTMCT). Aiming at the difficulties and pain points of actual business, PP-Tracking provides various MOT functions and applications such as pedestrian tracking, vehicle tracking, multi-class tracking, small object tracking, traffic statistics and multi-camera tracking. The deployment method supports API and GUI visual interface, and the deployment language supports Python and C++, The deployment platform environment supports Linux, NVIDIA Jetson, etc.
### AI studio public project tutorial
PP-tracking provides an AI studio public project tutorial. Please refer to this [tutorial](https://aistudio.baidu.com/aistudio/projectdetail/3022582).
## Model Zoo
### MCFairMOT Results on VisDrone2019 Val Set
| backbone | input shape | MOTA | IDF1 | IDS | FPS | download | config |
| :--------------| :------- | :----: | :----: | :---: | :------: | :----: |:----: |
| DLA-34 | 1088x608 | 24.3 | 41.6 | 2314 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_dla34_30e_1088x608_visdrone.pdparams) | [config](./mcfairmot_dla34_30e_1088x608_visdrone.yml) |
| HRNetV2-W18 | 1088x608 | 20.4 | 39.9 | 2603 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone.yml) |
| HRNetV2-W18 | 864x480 | 18.2 | 38.7 | 2416 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone.yml) |
| HRNetV2-W18 | 576x320 | 12.0 | 33.8 | 2178 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.yml) |
**Notes:**
- MOTA is the average MOTA of 10 categories in the VisDrone2019 MOT dataset, and its value is also equal to the average MOTA of all the evaluated video sequences. Here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot.zip) of the dataset.
- MCFairMOT used 4 GPUs for training 30 epochs. The batch size is 6 on each GPU for MCFairMOT DLA-34, and 8 for MCFairMOT HRNetV2-W18.
### MCFairMOT Results on VisDrone Vehicle Val Set
| backbone | input shape | MOTA | IDF1 | IDS | FPS | download | config |
| :--------------| :------- | :----: | :----: | :---: | :------: | :----: |:----: |
| DLA-34 | 1088x608 | 37.7 | 56.8 | 199 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_dla34_30e_1088x608_visdrone_vehicle_bytetracker.pdparams) | [config](./mcfairmot_dla34_30e_1088x608_visdrone_vehicle_bytetracker.yml) |
| HRNetV2-W18 | 1088x608 | 35.6 | 56.3 | 190 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml) |
**Notes:**
- MOTA is the average MOTA of 4 categories in the VisDrone Vehicle dataset, and this dataset is extracted from the VisDrone2019 MOT dataset, here we provide the download [link](https://bj.bcebos.com/v1/paddledet/data/mot/visdrone_mcmot_vehicle.zip).
- The tracker used in MCFairMOT model here is ByteTracker.
### MCFairMOT off-line quantization results on VisDrone Vehicle val-set
| Model | Compression Strategy | Prediction Delay(T4) |Prediction Delay(V100)| Model Configuration File |Compression Algorithm Configuration File |
| :--------------| :------- | :------: | :----: | :----: | :----: |
| DLA-34 | baseline | 41.3 | 21.9 |[Configuration File](./mcfairmot_dla34_30e_1088x608_visdrone_vehicle_bytetracker.yml)| - |
| DLA-34 | off-line quantization | 37.8 | 21.2 |[Configuration File](./mcfairmot_dla34_30e_1088x608_visdrone_vehicle_bytetracker.yml)|[Configuration File](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/slim/post_quant/mcfairmot_ptq.yml)|
## Getting Start
### 1. Training
Training MCFairMOT on 4 GPUs with following command
```bash
python -m paddle.distributed.launch --log_dir=./mcfairmot_dla34_30e_1088x608_visdrone/ --gpus 0,1,2,3 tools/train.py -c configs/mot/mcfairmot/mcfairmot_dla34_30e_1088x608_visdrone.yml
```
### 2. Evaluation
Evaluating the track performance of MCFairMOT on val dataset in single GPU with following commands:
```bash
# use weights released in PaddleDetection model zoo
CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/mcfairmot/mcfairmot_dla34_30e_1088x608_visdrone.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/mcfairmot_dla34_30e_1088x608_visdrone.pdparams
# use saved checkpoint in training
CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/mcfairmot/mcfairmot_dla34_30e_1088x608_visdrone.yml -o weights=output/mcfairmot_dla34_30e_1088x608_visdrone/model_final.pdparams
```
**Notes:**
- The default evaluation dataset is VisDrone2019 MOT val-set. If you want to change the evaluation dataset, please refer to the following code and modify `configs/datasets/mcmot.yml`:
```
EvalMOTDataset:
!MOTImageFolder
dataset_dir: dataset/mot
data_root: your_dataset/images/val
keep_ori_im: False # set True if save visualization images or video
```
- Tracking results will be saved in `{output_dir}/mot_results/`, and every sequence has one txt file, each line of the txt file is `frame,id,x1,y1,w,h,score,cls_id,-1,-1`, and you can set `{output_dir}` by `--output_dir`.
### 3. Inference
Inference a video on single GPU with following command:
```bash
# inference on video and save a video
CUDA_VISIBLE_DEVICES=0 python tools/infer_mot.py -c configs/mot/mcfairmot/mcfairmot_dla34_30e_1088x608_visdrone.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/mcfairmot_dla34_30e_1088x608_visdrone.pdparams --video_file={your video name}.mp4 --save_videos
```
**Notes:**
- Please make sure that [ffmpeg](https://ffmpeg.org/ffmpeg.html) is installed first, on Linux(Ubuntu) platform you can directly install it by the following command:`apt-get update && apt-get install -y ffmpeg`.
### 4. Export model
```bash
CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/mcfairmot/mcfairmot_dla34_30e_1088x608_visdrone.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/mcfairmot_dla34_30e_1088x608_visdrone.pdparams
```
### 5. Using exported model for python inference
```bash
python deploy/pptracking/python/mot_jde_infer.py --model_dir=output_inference/mcfairmot_dla34_30e_1088x608_visdrone --video_file={your video name}.mp4 --device=GPU --save_mot_txts
```
**Notes:**
- The tracking model is used to predict the video, and does not support the prediction of a single image. The visualization video of the tracking results is saved by default. You can add `--save_mot_txts` to save the txt result file, or `--save_images` to save the visualization images.
- Each line of the tracking results txt file is `frame,id,x1,y1,w,h,score,cls_id,-1,-1`.
### 6. Off-line quantization
The offline quantization model is calibrated using the VisDrone Vehicle val-set, running as:
```bash
CUDA_VISIBLE_DEVICES=0 python3.7 tools/post_quant.py -c configs/mot/mcfairmot/mcfairmot_dla34_30e_1088x608_visdrone_vehicle_bytetracker.yml --slim_config=configs/slim/post_quant/mcfairmot_ptq.yml
```
**Notes:**
- Offline quantization uses the VisDrone Vehicle val-set dataset and a 4-class vehicle tracking model by default.
## Citations
```
@article{zhang2020fair,
title={FairMOT: On the Fairness of Detection and Re-Identification in Multiple Object Tracking},
author={Zhang, Yifu and Wang, Chunyu and Wang, Xinggang and Zeng, Wenjun and Liu, Wenyu},
journal={arXiv preprint arXiv:2004.01888},
year={2020}
}
@ARTICLE{9573394,
author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Detection and Tracking Meet Drones Challenge},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3119563}
}
@article{zhang2021bytetrack,
title={ByteTrack: Multi-Object Tracking by Associating Every Detection Box},
author={Zhang, Yifu and Sun, Peize and Jiang, Yi and Yu, Dongdong and Yuan, Zehuan and Luo, Ping and Liu, Wenyu and Wang, Xinggang},
journal={arXiv preprint arXiv:2110.06864},
year={2021}
}
```
| PaddleDetection/configs/mot/mcfairmot/README.md/0 | {
"file_path": "PaddleDetection/configs/mot/mcfairmot/README.md",
"repo_id": "PaddleDetection",
"token_count": 3497
} | 30 |
[English](README.md) | 简体中文
# 特色垂类跟踪模型
## 大规模行人跟踪 (Pedestrian Tracking)
行人跟踪的主要应用之一是交通监控。
[PathTrack](https://www.trace.ethz.ch/publications/2017/pathtrack/index.html)包含720个视频序列,有着超过15000个行人的轨迹。包含了街景、舞蹈、体育运动、采访等各种场景的,大部分是移动摄像头拍摄场景。该数据集只有Pedestrian一类标注作为跟踪任务。
[VisDrone](http://aiskyeye.com)是无人机视角拍摄的数据集,是以俯视视角为主。该数据集涵盖不同位置(取自中国数千个相距数千公里的14个不同城市)、不同环境(城市和乡村)、不同物体(行人、车辆、自行车等)和不同密度(稀疏和拥挤的场景)。[VisDrone2019-MOT](https://github.com/VisDrone/VisDrone-Dataset)包含56个视频序列用于训练,7个视频序列用于验证。此处针对VisDrone2019-MOT多目标跟踪数据集进行提取,抽取出类别为pedestrian和people的数据组合成一个大的Pedestrian类别。
## 模型库
### FairMOT在各个数据集val-set上Pedestrian类别的结果
| 数据集 | 骨干网络 | 输入尺寸 | MOTA | IDF1 | FPS | 下载链接 | 配置文件 |
| :-------------| :-------- | :------- | :----: | :----: | :----: | :-----: |:------: |
| PathTrack | DLA-34 | 1088x608 | 44.9 | 59.3 | - |[下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_pathtrack.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_pathtrack.yml) |
| VisDrone | DLA-34 | 1088x608 | 49.2 | 63.1 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_dla34_30e_1088x608_visdrone_pedestrian.yml) |
| VisDrone | HRNetv2-W18| 1088x608 | 40.5 | 54.7 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_pedestrian.yml) |
| VisDrone | HRNetv2-W18| 864x480 | 38.6 | 50.9 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian.yml) |
| VisDrone | HRNetv2-W18| 576x320 | 30.6 | 47.2 | - | [下载链接](https://paddledet.bj.bcebos.com/models/mot/fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_pedestrian.pdparams) | [配置文件](./fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_pedestrian.yml) |
**注意:**
- FairMOT均使用DLA-34为骨干网络,4个GPU进行训练,每个GPU上batch size为6,训练30个epoch。
## 数据集准备和处理
### 1、数据集处理代码说明
代码统一都在tools目录下
```
# visdrone
tools/visdrone/visdrone2mot.py: 生成visdrone_pedestrian据集
```
### 2、visdrone_pedestrian数据集处理
```
# 复制tool/visdrone/visdrone2mot.py到数据集目录下
# 生成visdrone_pedestrian MOT格式的数据,抽取类别classes=1,2 (pedestrian, people)
<<--生成前目录-->>
├── VisDrone2019-MOT-val
│ ├── annotations
│ ├── sequences
│ ├── visdrone2mot.py
<<--生成后目录-->>
├── VisDrone2019-MOT-val
│ ├── annotations
│ ├── sequences
│ ├── visdrone2mot.py
│ ├── visdrone_pedestrian
│ │ ├── images
│ │ │ ├── train
│ │ │ ├── val
│ │ ├── labels_with_ids
│ │ │ ├── train
│ │ │ ├── val
# 执行
python visdrone2mot.py --transMot=True --data_name=visdrone_pedestrian --phase=val
python visdrone2mot.py --transMot=True --data_name=visdrone_pedestrian --phase=train
```
## 快速开始
### 1. 训练
使用2个GPU通过如下命令一键式启动训练
```bash
python -m paddle.distributed.launch --log_dir=./fairmot_dla34_30e_1088x608_visdrone_pedestrian/ --gpus 0,1 tools/train.py -c configs/mot/pedestrian/fairmot_dla34_30e_1088x608_visdrone_pedestrian.yml
```
### 2. 评估
使用单张GPU通过如下命令一键式启动评估
```bash
# 使用PaddleDetection发布的权重
CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/pedestrian/fairmot_dla34_30e_1088x608_visdrone_pedestrian.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_visdrone_pedestrian.pdparams
# 使用训练保存的checkpoint
CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/pedestrian/fairmot_dla34_30e_1088x608_visdrone_pedestrian.yml -o weights=output/fairmot_dla34_30e_1088x608_visdrone_pedestrian/model_final.pdparams
```
### 3. 预测
使用单个GPU通过如下命令预测一个视频,并保存为视频
```bash
# 预测一个视频
CUDA_VISIBLE_DEVICES=0 python tools/infer_mot.py -c configs/mot/pedestrian/fairmot_dla34_30e_1088x608_visdrone_pedestrian.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_visdrone_pedestrian.pdparams --video_file={your video name}.mp4 --save_videos
```
**注意:**
- 请先确保已经安装了[ffmpeg](https://ffmpeg.org/ffmpeg.html), Linux(Ubuntu)平台可以直接用以下命令安装:`apt-get update && apt-get install -y ffmpeg`。
### 4. 导出预测模型
```bash
CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/pedestrian/fairmot_dla34_30e_1088x608_visdrone_pedestrian.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_visdrone_pedestrian.pdparams
```
### 5. 用导出的模型基于Python去预测
```bash
python deploy/pptracking/python/mot_jde_infer.py --model_dir=output_inference/fairmot_dla34_30e_1088x608_visdrone_pedestrian --video_file={your video name}.mp4 --device=GPU --save_mot_txts
```
**注意:**
- 跟踪模型是对视频进行预测,不支持单张图的预测,默认保存跟踪结果可视化后的视频,可添加`--save_mot_txts`表示保存跟踪结果的txt文件,或`--save_images`表示保存跟踪结果可视化图片。
- 跟踪结果txt文件每行信息是`frame,id,x1,y1,w,h,score,-1,-1,-1`。
## 引用
```
@article{zhang2020fair,
title={FairMOT: On the Fairness of Detection and Re-Identification in Multiple Object Tracking},
author={Zhang, Yifu and Wang, Chunyu and Wang, Xinggang and Zeng, Wenjun and Liu, Wenyu},
journal={arXiv preprint arXiv:2004.01888},
year={2020}
}
@INPROCEEDINGS{8237302,
author={S. {Manen} and M. {Gygli} and D. {Dai} and L. V. {Gool}},
booktitle={2017 IEEE International Conference on Computer Vision (ICCV)},
title={PathTrack: Fast Trajectory Annotation with Path Supervision},
year={2017},
volume={},
number={},
pages={290-299},
doi={10.1109/ICCV.2017.40},
ISSN={2380-7504},
month={Oct},}
@ARTICLE{9573394,
author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
title={Detection and Tracking Meet Drones Challenge},
year={2021},
volume={},
number={},
pages={1-1},
doi={10.1109/TPAMI.2021.3119563}
}
```
| PaddleDetection/configs/mot/pedestrian/README.md/0 | {
"file_path": "PaddleDetection/configs/mot/pedestrian/README.md",
"repo_id": "PaddleDetection",
"token_count": 3765
} | 31 |
_BASE_: [
'../../../datasets/coco_detection.yml',
'../../../runtime.yml',
'../_base_/picodet_esnet.yml',
'./optimizer_300e_pruner.yml',
'../_base_/picodet_320_reader.yml',
]
weights: output/picodet_m_320_coco/model_final
find_unused_parameters: True
use_ema: true
cycle_epoch: 40
snapshot_epoch: 10
| PaddleDetection/configs/picodet/legacy_model/pruner/picodet_m_320_coco_pruner.yml/0 | {
"file_path": "PaddleDetection/configs/picodet/legacy_model/pruner/picodet_m_320_coco_pruner.yml",
"repo_id": "PaddleDetection",
"token_count": 143
} | 32 |
简体中文 | [English](README.md)
# PP-YOLOE Human 检测模型
PaddleDetection团队提供了针对行人的基于PP-YOLOE的检测模型,用户可以下载模型进行使用。PP-Human中使用模型为业务数据集模型,我们同时提供CrowdHuman训练配置,可以使用开源数据进行训练。
其中整理后的COCO格式的CrowdHuman数据集[下载链接](https://bj.bcebos.com/v1/paddledet/data/crowdhuman.zip),检测类别仅一类 `pedestrian(1)`,原始数据集[下载链接](http://www.crowdhuman.org/download.html)。
相关模型的部署模型均在[PP-Human](../../deploy/pipeline/)项目中使用。
| 模型 | 数据集 | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | 下载 | 配置文件 |
|:---------|:-------:|:------:|:------:| :----: | :------:|
|PP-YOLOE-s| CrowdHuman | 42.5 | 77.9 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_36e_crowdhuman.pdparams) | [配置文件](./ppyoloe_crn_s_36e_crowdhuman.yml) |
|PP-YOLOE-l| CrowdHuman | 48.0 | 81.9 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_36e_crowdhuman.pdparams) | [配置文件](./ppyoloe_crn_l_36e_crowdhuman.yml) |
|PP-YOLOE-s| 业务数据集 | 53.2 | - | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_36e_pphuman.pdparams) | [配置文件](./ppyoloe_crn_s_36e_pphuman.yml) |
|PP-YOLOE-l| 业务数据集 | 57.8 | - | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_36e_pphuman.pdparams) | [配置文件](./ppyoloe_crn_l_36e_pphuman.yml) |
|PP-YOLOE+_t-aux(320)| 业务数据集 | 45.7 | 81.2 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_t_auxhead_320_60e_pphuman.pdparams) | [配置文件](./ppyoloe_plus_crn_t_auxhead_320_60e_pphuman.yml) |
**注意:**
- PP-YOLOE模型训练过程中使用8 GPUs进行混合精度训练,如果**GPU卡数**或者**batch size**发生了改变,你需要按照公式 **lr<sub>new</sub> = lr<sub>default</sub> * (batch_size<sub>new</sub> * GPU_number<sub>new</sub>) / (batch_size<sub>default</sub> * GPU_number<sub>default</sub>)** 调整学习率。
- 具体使用教程请参考[ppyoloe](../ppyoloe#getting-start)。
# YOLOv3 Human 检测模型
请参考[Human_YOLOv3页面](./pedestrian_yolov3/README_cn.md)
# PP-YOLOE 香烟检测模型
基于PP-YOLOE模型的香烟检测模型,是实现PP-Human中的基于检测的行为识别方案的一环,如何在PP-Human中使用该模型进行吸烟行为识别,可参考[PP-Human行为识别模块](../../deploy/pipeline/docs/tutorials/pphuman_action.md)。该模型检测类别仅包含香烟一类。由于数据来源限制,目前暂无法直接公开训练数据。该模型使用了小目标数据集VisDrone上的权重(参照[visdrone](../visdrone))作为预训练模型,以提升检测效果。
| 模型 | 数据集 | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | 下载 | 配置文件 |
|:---------|:-------:|:------:|:------:| :----: | :------:|
| PP-YOLOE-s | 香烟业务数据集 | 39.7 | 79.5 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/ppyoloe_crn_s_80e_smoking_visdrone.pdparams) | [配置文件](./ppyoloe_crn_s_80e_smoking_visdrone.yml) |
# PP-HGNet 打电话识别模型
基于PP-HGNet模型实现了打电话行为识别,详细可参考[PP-Human行为识别模块](../../deploy/pipeline/docs/tutorials/pphuman_action.md)。该模型基于[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/PP-HGNet.md#3.3)套件进行训练。此处提供预测模型下载:
| 模型 | 数据集 | Acc | 下载 | 配置文件 |
|:---------|:-------:|:------:| :----: | :------:|
| PP-HGNet | 业务数据集 | 86.85 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_tiny_calling_halfbody.zip) | - |
# HRNet 人体关键点模型
人体关键点模型与ST-GCN模型一起完成[基于骨骼点的行为识别](../../deploy/pipeline/docs/tutorials/pphuman_action.md)方案。关键点模型采用HRNet模型,关于关键点模型相关详细资料可以查看关键点专栏页面[KeyPoint](../keypoint/README.md)。此处提供训练模型下载链接。
| 模型 | 数据集 | AP<sup>val<br>0.5:0.95 | 下载 | 配置文件 |
|:---------|:-------:|:------:| :----: | :------:|
| HRNet | 业务数据集 | 87.1 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/dark_hrnet_w32_256x192.pdparams) | [配置文件](./hrnet_w32_256x192.yml) |
# ST-GCN 骨骼点行为识别模型
人体关键点模型与[ST-GCN](https://arxiv.org/abs/1801.07455)模型一起完成[基于骨骼点的行为识别](../../deploy/pipeline/docs/tutorials/pphuman_action.md)方案。
ST-GCN模型基于[PaddleVideo](https://github.com/PaddlePaddle/PaddleVideo/blob/develop/applications/PPHuman)完成训练。
此处提供预测模型下载链接。
| 模型 | 数据集 | AP<sup>val<br>0.5:0.95 | 下载 | 配置文件 |
|:---------|:-------:|:------:| :----: | :------:|
| ST-GCN | 业务数据集 | 87.1 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/STGCN.zip) | [配置文件](https://github.com/PaddlePaddle/PaddleVideo/blob/develop/applications/PPHuman/configs/stgcn_pphuman.yaml) |
# PP-TSM 视频分类模型
基于`PP-TSM`模型完成了[基于视频分类的行为识别](../../deploy/pipeline/docs/tutorials/pphuman_action.md)方案。
PP-TSM模型基于[PaddleVideo](https://github.com/PaddlePaddle/PaddleVideo/tree/develop/applications/FightRecognition)完成训练。
此处提供预测模型下载链接。
| 模型 | 数据集 | Acc | 下载 | 配置文件 |
|:---------|:-------:|:------:| :----: | :------:|
| PP-TSM | 组合开源数据集 | 89.06 |[下载链接](https://videotag.bj.bcebos.com/PaddleVideo-release2.3/ppTSM_fight.zip) | [配置文件](https://github.com/PaddlePaddle/PaddleVideo/tree/develop/applications/FightRecognition/pptsm_fight_frames_dense.yaml) |
# PP-HGNet、PP-LCNet 属性识别模型
基于PP-HGNet、PP-LCNet 模型实现了行人属性识别,详细可参考[PP-Human行为识别模块](../../deploy/pipeline/docs/tutorials/pphuman_attribute.md)。该模型基于[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/PP-LCNet.md)套件进行训练。此处提供预测模型下载链接.
| 模型 | 数据集 | mA | 下载 | 配置文件 |
|:---------|:-------:|:------:| :----: | :------:|
| PP-HGNet_small | 业务数据集 | 95.4 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPHGNet_small_person_attribute_954_infer.zip) | - |
| PP-LCNet | 业务数据集 | 94.5 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/PPLCNet_x1_0_person_attribute_945_infer.zip) | [配置文件](https://github.com/PaddlePaddle/PaddleClas/blob/develop/ppcls/configs/PULC/person_attribute/PPLCNet_x1_0.yaml) |
## 引用
```
@article{shao2018crowdhuman,
title={CrowdHuman: A Benchmark for Detecting Human in a Crowd},
author={Shao, Shuai and Zhao, Zijian and Li, Boxun and Xiao, Tete and Yu, Gang and Zhang, Xiangyu and Sun, Jian},
journal={arXiv preprint arXiv:1805.00123},
year={2018}
}
```
| PaddleDetection/configs/pphuman/README.md/0 | {
"file_path": "PaddleDetection/configs/pphuman/README.md",
"repo_id": "PaddleDetection",
"token_count": 4273
} | 33 |
简体中文 | [English](README.md)
## PP-YOLOE Vehicle 检测模型
PaddleDetection团队提供了针对自动驾驶场景的基于PP-YOLOE的检测模型,用户可以下载模型进行使用,主要包含5个数据集(BDD100K-DET、BDD100K-MOT、UA-DETRAC、PPVehicle9cls、PPVehicle)。其中前3者为公开数据集,后两者为整合数据集。
- BDD100K-DET具体类别为10类,包括`pedestrian(1), rider(2), car(3), truck(4), bus(5), train(6), motorcycle(7), bicycle(8), traffic light(9), traffic sign(10)`。
- BDD100K-MOT具体类别为8类,包括`pedestrian(1), rider(2), car(3), truck(4), bus(5), train(6), motorcycle(7), bicycle(8)`,但数据集比BDD100K-DET更大更多。
- UA-DETRAC具体类别为4类,包括`car(1), bus(2), van(3), others(4)`。
- PPVehicle9cls数据集整合了BDD100K-MOT和UA-DETRAC,具体类别为9类,包括`pedestrian(1), rider(2), car(3), truck(4), bus(5), van(6), motorcycle(7), bicycle(8), others(9)`。
- PPVehicle数据集整合了BDD100K-MOT和UA-DETRAC,是将BDD100K-MOT中的`car, truck, bus, van`和UA-DETRAC中的`car, bus, van`都合并为1类`vehicle(1)`后的数据集。
相关模型的部署模型均在[PP-Vehicle](../../deploy/pipeline/)项目中使用。
| 模型 | 数据集 | 类别数 | mAP<sup>val<br>0.5:0.95 | 下载链接 | 配置文件 |
|:---------|:---------------:|:------:|:-----------------------:|:---------:| :-----: |
|PP-YOLOE-l| BDD100K-DET | 10 | 35.6 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_36e_bdd100kdet.pdparams) | [配置文件](./ppyoloe_crn_l_36e_bdd100kdet.yml) |
|PP-YOLOE-l| BDD100K-MOT | 8 | 33.7 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_36e_bdd100kmot.pdparams) | [配置文件](./ppyoloe_crn_l_36e_bdd100kmot.yml) |
|PP-YOLOE-l| UA-DETRAC | 4 | 51.4 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_36e_uadetrac.pdparams) | [配置文件](./ppyoloe_crn_l_36e_uadetrac.yml) |
|PP-YOLOE-l| PPVehicle9cls | 9 | 40.0 | [下载链接](https://paddledet.bj.bcebos.com/models/mot_ppyoloe_l_36e_ppvehicle9cls.pdparams) | [配置文件](./mot_ppyoloe_l_36e_ppvehicle9cls.yml) |
|PP-YOLOE-s| PPVehicle9cls | 9 | 35.3 | [下载链接](https://paddledet.bj.bcebos.com/models/mot_ppyoloe_s_36e_ppvehicle9cls.pdparams) | [配置文件](./mot_ppyoloe_s_36e_ppvehicle9cls.yml) |
|PP-YOLOE-l| PPVehicle | 1 | 63.9 | [下载链接](https://paddledet.bj.bcebos.com/models/mot_ppyoloe_l_36e_ppvehicle.pdparams) | [配置文件](./mot_ppyoloe_l_36e_ppvehicle.yml) |
|PP-YOLOE-s| PPVehicle | 1 | 61.3 | [下载链接](https://paddledet.bj.bcebos.com/models/mot_ppyoloe_s_36e_ppvehicle.pdparams) | [配置文件](./mot_ppyoloe_s_36e_ppvehicle.yml) |
|PP-YOLOE+_t-aux(320)| PPVehicle | 1 | 53.5 | [下载链接](https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_t_auxhead_320_60e_ppvehicle.pdparams) | [配置文件](./ppyoloe_plus_crn_t_auxhead_320_60e_ppvehicle.yml) |
**注意:**
- PP-YOLOE模型训练过程中使用8 GPUs进行混合精度训练,如果**GPU卡数**或者**batch size**发生了改变,你需要按照公式 **lr<sub>new</sub> = lr<sub>default</sub> * (batch_size<sub>new</sub> * GPU_number<sub>new</sub>) / (batch_size<sub>default</sub> * GPU_number<sub>default</sub>)** 调整学习率。
- 具体使用教程请参考[ppyoloe](../ppyoloe#getting-start)。
- 如需预测出对应类别,可自行修改和添加对应的label_list.txt文件(一行记录一个对应种类),TestDataset中的anno_path为绝对路径,如:
```
TestDataset:
!ImageFolder
anno_path: label_list.txt # 如不使用dataset_dir,则anno_path即为相对于PaddleDetection主目录的相对路径
# dataset_dir: dataset/ppvehicle # 如使用dataset_dir,则dataset_dir/anno_path作为新的anno_path
```
label_list.txt里的一行记录一个对应种类,如下所示:
```
vehicle
```
## YOLOv3 Vehicle 检测模型
请参考[Vehicle_YOLOv3页面](./vehicle_yolov3/README_cn.md)
## PP-OCRv3 车牌识别模型
车牌识别采用Paddle自研超轻量级模型PP-OCRv3_det、PP-OCRv3_rec。在[CCPD数据集](https://github.com/detectRecog/CCPD)(CCPD2019+CCPD2020车牌数据集)上进行了fine-tune。模型训练基于[PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/applications/%E8%BD%BB%E9%87%8F%E7%BA%A7%E8%BD%A6%E7%89%8C%E8%AF%86%E5%88%AB.md)完成,我们提供了预测模型下载:
| 模型 | 数据集 | 精度 | 下载 | 配置文件 |
|:---------|:-------:|:------:| :----: | :------:|
| PP-OCRv3_det | CCPD组合数据集 | hmean:0.979 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_det_infer.tar.gz) | [配置文件](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det_cml.yml) |
| PP-OCRv3_rec | CCPD组合数据集 | acc:0.773 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_rec_infer.tar.gz) | [配置文件](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml) |
## PP-LCNet 车牌属性模型
车牌属性采用Paddle自研超轻量级模型PP-LCNet。在[VeRi数据集](https://www.v7labs.com/open-datasets/veri-dataset)进行训练。模型训练基于[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/PULC/PULC_vehicle_attribute_en.md)完成,我们提供了预测模型下载:
| 模型 | 数据集 | 精度 | 下载 | 配置文件 |
|:---------|:-------:|:------:| :----: | :------:|
| PP-LCNet_x1_0 | VeRi数据集 | 90.81 |[下载链接](https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip) | [配置文件](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/ppcls/configs/PULC/vehicle_attribute/PPLCNet_x1_0.yaml) |
## 引用
```
@InProceedings{bdd100k,
author = {Yu, Fisher and Chen, Haofeng and Wang, Xin and Xian, Wenqi and Chen,
Yingying and Liu, Fangchen and Madhavan, Vashisht and Darrell, Trevor},
title = {BDD100K: A Diverse Driving Dataset for Heterogeneous Multitask Learning},
booktitle = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2020}
}
@article{CVIU_UA-DETRAC,
author = {Longyin Wen and Dawei Du and Zhaowei Cai and Zhen Lei and Ming{-}Ching Chang and
Honggang Qi and Jongwoo Lim and Ming{-}Hsuan Yang and Siwei Lyu},
title = {{UA-DETRAC:} {A} New Benchmark and Protocol for Multi-Object Detection and Tracking},
journal = {Computer Vision and Image Understanding},
year = {2020}
}
```
| PaddleDetection/configs/ppvehicle/README.md/0 | {
"file_path": "PaddleDetection/configs/ppvehicle/README.md",
"repo_id": "PaddleDetection",
"token_count": 3794
} | 34 |
_BASE_: [
'../../datasets/coco_detection.yml',
'../../runtime.yml',
'../../yolov3/_base_/optimizer_270e.yml',
'../../yolov3/_base_/yolov3_darknet53.yml',
'../../yolov3/_base_/yolov3_reader.yml',
]
snapshot_epoch: 5
weights: https://paddledet.bj.bcebos.com/models/vehicle_yolov3_darknet.pdparams
YOLOv3Head:
anchors: [[8, 9], [10, 23], [19, 15],
[23, 33], [40, 25], [54, 50],
[101, 80], [139, 145], [253, 224]]
BBoxPostProcess:
nms:
name: MultiClassNMS
keep_top_k: 100
score_threshold: 0.005
nms_threshold: 0.45
nms_top_k: 400
num_classes: 6
TrainDataset:
!COCODataSet
dataset_dir: dataset/vehicle
anno_path: annotations/instances_train2017.json
image_dir: train2017
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
dataset_dir: dataset/vehicle
anno_path: annotations/instances_val2017.json
image_dir: val2017
TestDataset:
!ImageFolder
anno_path: configs/ppvehicle/vehicle_yolov3/vehicle.json
| PaddleDetection/configs/ppvehicle/vehicle_yolov3/vehicle_yolov3_darknet.yml/0 | {
"file_path": "PaddleDetection/configs/ppvehicle/vehicle_yolov3/vehicle_yolov3_darknet.yml",
"repo_id": "PaddleDetection",
"token_count": 487
} | 35 |
_BASE_: [
'./_base_/pcb_detection.yml',
'../../runtime.yml',
'../_base_/optimizer_80e.yml',
'../_base_/ppyoloe_crn.yml',
'../_base_/ppyoloe_reader.yml',
]
log_iter: 100
snapshot_epoch: 5
weights: output/ppyoloe_crn_m_80e_pcb/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_crn_m_300e_coco.pdparams
depth_mult: 0.67
width_mult: 0.75
| PaddleDetection/configs/ppyoloe/application/ppyoloe_crn_m_80e_pcb.yml/0 | {
"file_path": "PaddleDetection/configs/ppyoloe/application/ppyoloe_crn_m_80e_pcb.yml",
"repo_id": "PaddleDetection",
"token_count": 183
} | 36 |
# PP-YOLOE
## 模型库
### Objects365数据集模型库
| 模型 | Epoch | 机器个数 | GPU个数 | 每GPU图片个数 | 骨干网络 | 输入尺寸 | Box AP<sup>0.5 | Params(M) | FLOPs(G) | V100 FP32(FPS) | V100 TensorRT FP16(FPS) | 模型下载 | 配置文件 |
|:---------------:|:-----:|:-----------:|:-----------:|:-----------:|:---------:|:----------:|:--------------:|:---------:|:---------:|:-------------:|:-----------------------:| :--------:|:--------:|
| PP-YOLOE+_s | 60 | 3 | 8 | 8 | cspresnet-s | 640 | 18.1 | 7.93 | 17.36 | 208.3 | 333.3 | [model](https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_s_obj365_pretrained.pdparams) | [config](./ppyoloe_plus_crn_s_60e_objects365.yml) |
| PP-YOLOE+_m | 60 | 4 | 8 | 8 | cspresnet-m | 640 | 25.0 | 23.43 | 49.91 | 123.4 | 208.3 | [model](https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_m_obj365_pretrained.pdparams) | [config](./ppyoloe_plus_crn_m_60e_objects365.yml) |
| PP-YOLOE+_l | 60 | 3 | 8 | 8 | cspresnet-l | 640 | 30.8 | 52.20 | 110.07 | 78.1 | 149.2 | [model](https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_l_obj365_pretrained.pdparams) | [config](./ppyoloe_plus_crn_l_60e_objects365.yml) |
| PP-YOLOE+_x | 60 | 4 | 8 | 8 | cspresnet-x | 640 | 32.7 | 98.42 | 206.59 | 45.0 | 95.2 | [model](https://bj.bcebos.com/v1/paddledet/models/pretrained/ppyoloe_crn_x_obj365_pretrained.pdparams) | [config](./ppyoloe_plus_crn_x_60e_objects365.yml) |
**注意:**
- 多机训练细节见[文档](../../../docs/tutorials/DistributedTraining_cn.md)
- Objects365数据集下载请参考[objects365官网](http://www.objects365.org/overview.html)。具体种类列表可下载由PaddleDetection团队整理的[objects365_detection_label_list.txt](https://bj.bcebos.com/v1/paddledet/data/objects365/objects365_detection_label_list.txt)并存放在`dataset/objects365/`,每一行即表示第几个种类。inference或导出模型时需要读取到种类数,如果没有标注json文件时,可以进行如下更改`configs/datasets/objects365_detection.yml`:
```
TestDataset:
!ImageFolder
# anno_path: annotations/zhiyuan_objv2_val.json
anno_path: objects365_detection_label_list.txt
dataset_dir: dataset/objects365/
```
| PaddleDetection/configs/ppyoloe/objects365/README_cn.md/0 | {
"file_path": "PaddleDetection/configs/ppyoloe/objects365/README_cn.md",
"repo_id": "PaddleDetection",
"token_count": 1307
} | 37 |
_BASE_: [
'../datasets/coco_detection.yml',
'../runtime.yml',
'./_base_/optimizer_300e.yml',
'./_base_/ppyoloe_plus_crn_tiny_auxhead.yml',
'./_base_/ppyoloe_plus_reader.yml',
]
log_iter: 100
snapshot_epoch: 10
weights: output/ppyoloe_plus_crn_t_auxhead_relu_300e_coco/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_t_pretrained.pdparams
depth_mult: 0.33
width_mult: 0.375
CSPResNet:
act: 'relu'
CustomCSPPAN:
act: 'relu'
PPYOLOEHead:
act: 'relu'
attn_conv: None
| PaddleDetection/configs/ppyoloe/ppyoloe_plus_crn_t_auxhead_relu_300e_coco.yml/0 | {
"file_path": "PaddleDetection/configs/ppyoloe/ppyoloe_plus_crn_t_auxhead_relu_300e_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 253
} | 38 |
English | [简体中文](README.md)
# Rotated Object Detection
## Table of Contents
- [Introduction](#Introduction)
- [Model Zoo](#Model-Zoo)
- [Data Preparation](#Data-Preparation)
- [Installation](#Installation)
## Introduction
Rotated object detection is used to detect rectangular bounding boxes with angle information, that is, the long and short sides of the rectangular bounding box are no longer parallel to the image coordinate axes. Oriented bounding boxes generally contain less background information than horizontal bounding boxes. Rotated object detection is often used in remote sensing scenarios.
## Model Zoo
| Model | mAP | Lr Scheduler | Angle | Aug | GPU Number | images/GPU | download | config |
|:---:|:----:|:---------:|:-----:|:--------:|:-----:|:------------:|:-------:|:------:|
| [S2ANet](./s2anet/README_en.md) | 73.84 | 2x | le135 | - | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/s2anet_alignconv_2x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/s2anet/s2anet_alignconv_2x_dota.yml) |
| [FCOSR](./fcosr/README_en.md) | 76.62 | 3x | oc | RR | 4 | 4 | [model](https://paddledet.bj.bcebos.com/models/fcosr_x50_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/fcosr/fcosr_x50_3x_dota.yml) |
| [PP-YOLOE-R-s](./ppyoloe_r/README_en.md) | 73.82 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_s_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota.yml) |
| [PP-YOLOE-R-s](./ppyoloe_r/README_en.md) | 79.42 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_s_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota_ms.yml) |
| [PP-YOLOE-R-m](./ppyoloe_r/README_en.md) | 77.64 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_m_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_m_3x_dota.yml) |
| [PP-YOLOE-R-m](./ppyoloe_r/README_en.md) | 79.71 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_m_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_m_3x_dota_ms.yml) |
| [PP-YOLOE-R-l](./ppyoloe_r/README_en.md) | 78.14 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota.yml) |
| [PP-YOLOE-R-l](./ppyoloe_r/README_en.md) | 80.02 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_l_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_l_3x_dota_ms.yml) |
| [PP-YOLOE-R-x](./ppyoloe_r/README_en.md) | 78.28 | 3x | oc | RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_x_3x_dota.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_x_3x_dota.yml) |
| [PP-YOLOE-R-x](./ppyoloe_r/README_en.md) | 80.73 | 3x | oc | MS+RR | 4 | 2 | [model](https://paddledet.bj.bcebos.com/models/ppyoloe_r_crn_x_3x_dota_ms.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r/ppyoloe_r_crn_x_3x_dota_ms.yml) |
**Notes:**
- if **GPU number** or **mini-batch size** is changed, **learning rate** should be adjusted according to the formula **lr<sub>new</sub> = lr<sub>default</sub> * (batch_size<sub>new</sub> * GPU_number<sub>new</sub>) / (batch_size<sub>default</sub> * GPU_number<sub>default</sub>)**.
- Models in model zoo is trained and tested with single scale by default. If `MS` is indicated in the data augmentation column, it means that multi-scale training and multi-scale testing are used. If `RR` is indicated in the data augmentation column, it means that RandomRotate data augmentation is used for training.
## Data Preparation
### DOTA Dataset preparation
The DOTA dataset is a large-scale remote sensing image dataset containing annotations of oriented and horizontal bounding boxes. The dataset can be download from [Official Website of DOTA Dataset](https://captain-whu.github.io/DOTA/). When the dataset is decompressed, its directory structure is shown as follows.
```
${DOTA_ROOT}
├── test
│ └── images
├── train
│ ├── images
│ └── labelTxt
└── val
├── images
└── labelTxt
```
For labeled data, each image corresponds to a txt file with the same name, and each row in the txt file represent a rotated bouding box. The format is as follows:
```
x1 y1 x2 y2 x3 y3 x4 y4 class_name difficult
```
#### Slicing data with single scale
The image resolution of DOTA dataset is relatively high, so we usually slice the images before training and testing. To slice the images with a single scale, you can use the command below
``` bash
# slicing labeled data
python configs/rotate/tools/prepare_data.py \
--input_dirs ${DOTA_ROOT}/train/ ${DOTA_ROOT}/val/ \
--output_dir ${OUTPUT_DIR}/trainval1024/ \
--coco_json_file DOTA_trainval1024.json \
--subsize 1024 \
--gap 200 \
--rates 1.0
# slicing unlabeled data by setting --image_only
python configs/rotate/tools/prepare_data.py \
--input_dirs ${DOTA_ROOT}/test/ \
--output_dir ${OUTPUT_DIR}/test1024/ \
--coco_json_file DOTA_test1024.json \
--subsize 1024 \
--gap 200 \
--rates 1.0 \
--image_only
```
#### Slicing data with multi scale
To slice the images with multiple scales, you can use the command below
``` bash
# slicing labeled data
python configs/rotate/tools/prepare_data.py \
--input_dirs ${DOTA_ROOT}/train/ ${DOTA_ROOT}/val/ \
--output_dir ${OUTPUT_DIR}/trainval/ \
--coco_json_file DOTA_trainval1024.json \
--subsize 1024 \
--gap 500 \
--rates 0.5 1.0 1.5
# slicing unlabeled data by setting --image_only
python configs/rotate/tools/prepare_data.py \
--input_dirs ${DOTA_ROOT}/test/ \
--output_dir ${OUTPUT_DIR}/test1024/ \
--coco_json_file DOTA_test1024.json \
--subsize 1024 \
--gap 500 \
--rates 0.5 1.0 1.5 \
--image_only
```
### Custom Dataset
Rotated object detction uses the standard COCO data format, and you can convert your dataset to COCO format to train the model. The annotations of standard COCO format contains the following information
``` python
'annotations': [
{
'id': 2083, 'category_id': 9, 'image_id': 9008,
'bbox': [x, y, w, h], # horizontal bouding box
'segmentation': [[x1, y1, x2, y2, x3, y3, x4, y4]], # rotated bounding box
...
}
...
]
```
**It should be noted that `bbox` is the horizontal bouding box, and `segmentation` is four points of rotated bounding box (clockwise or counterclockwise). The `bbox` can be empty when training rotated object detector, and it is recommended to generate `bbox` according to `segmentation`**. In PaddleDetection 2.4 and earlier versions, `bbox` represents the rotated bounding box [x, y, w, h, angle] and `segmentation` is empty. **But this format is no longer supported after PaddleDetection 2.5, please download the latest dataset or convert to standard COCO format**.
## Installation
Models of rotated object detection depend on external operators for training, evaluation, etc. In Linux environment, you can execute the following command to compile and install.
```
cd ppdet/ext_op
python setup.py install
```
In Windows environment, perform the following steps to install it:
(1)Visual Studio (version required >= Visual Studio 2015 Update3);
(2)Go to Start --> Visual Studio 2017 --> X64 native Tools command prompt for VS 2017;
(3)Setting Environment Variables:set DISTUTILS_USE_SDK=1
(4)Enter `ppdet/ext_op` directory,use `python setup.py install` to install。
After the installation, you can execute the unittest of `ppdet/ext_op/unittest` to verify whether the external oprators is installed correctly.
| PaddleDetection/configs/rotate/README_en.md/0 | {
"file_path": "PaddleDetection/configs/rotate/README_en.md",
"repo_id": "PaddleDetection",
"token_count": 3157
} | 39 |
_BASE_: [
'../../datasets/dota.yml',
'../../runtime.yml',
'_base_/optimizer_3x.yml',
'_base_/ppyoloe_r_reader.yml',
'_base_/ppyoloe_r_crn.yml'
]
log_iter: 50
snapshot_epoch: 1
weights: output/ppyoloe_r_crn_s_3x_dota/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_s_pretrained.pdparams
depth_mult: 0.33
width_mult: 0.50
| PaddleDetection/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota.yml/0 | {
"file_path": "PaddleDetection/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_dota.yml",
"repo_id": "PaddleDetection",
"token_count": 183
} | 40 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import six
import glob
import copy
import yaml
import argparse
import cv2
import numpy as np
from shapely.geometry import Polygon
from onnxruntime import InferenceSession
# preprocess ops
def decode_image(img_path):
with open(img_path, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
img_info = {
"im_shape": np.array(
im.shape[:2], dtype=np.float32),
"scale_factor": np.array(
[1., 1.], dtype=np.float32)
}
return im, img_info
class Resize(object):
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
origin_shape = im.shape[:2]
im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class Permute(object):
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
im = im.transpose((2, 0, 1))
return im, im_info
class NormalizeImage(object):
def __init__(self, mean, std, is_scale=True, norm_type='mean_std'):
self.mean = mean
self.std = std
self.is_scale = is_scale
self.norm_type = norm_type
def __call__(self, im, im_info):
im = im.astype(np.float32, copy=False)
if self.is_scale:
scale = 1.0 / 255.0
im *= scale
if self.norm_type == 'mean_std':
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im -= mean
im /= std
return im, im_info
class PadStride(object):
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
class Compose:
def __init__(self, transforms):
self.transforms = []
for op_info in transforms:
new_op_info = op_info.copy()
op_type = new_op_info.pop('type')
self.transforms.append(eval(op_type)(**new_op_info))
def __call__(self, img_path):
img, im_info = decode_image(img_path)
for t in self.transforms:
img, im_info = t(img, im_info)
inputs = copy.deepcopy(im_info)
inputs['image'] = img
return inputs
# postprocess
def rbox_iou(g, p):
g = np.array(g)
p = np.array(p)
g = Polygon(g[:8].reshape((4, 2)))
p = Polygon(p[:8].reshape((4, 2)))
g = g.buffer(0)
p = p.buffer(0)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
def multiclass_nms_rotated(pred_bboxes,
pred_scores,
iou_threshlod=0.1,
score_threshold=0.1):
"""
Args:
pred_bboxes (numpy.ndarray): [B, N, 8]
pred_scores (numpy.ndarray): [B, C, N]
Return:
bboxes (numpy.ndarray): [N, 10]
bbox_num (numpy.ndarray): [B]
"""
bbox_num = []
bboxes = []
for bbox_per_img, score_per_img in zip(pred_bboxes, pred_scores):
num_per_img = 0
for cls_id, score_per_cls in enumerate(score_per_img):
keep_mask = score_per_cls > score_threshold
bbox = bbox_per_img[keep_mask]
score = score_per_cls[keep_mask]
idx = score.argsort()[::-1]
bbox = bbox[idx]
score = score[idx]
keep_idx = []
for i, b in enumerate(bbox):
supressed = False
for gi in keep_idx:
g = bbox[gi]
if rbox_iou(b, g) > iou_threshlod:
supressed = True
break
if supressed:
continue
keep_idx.append(i)
keep_box = bbox[keep_idx]
keep_score = score[keep_idx]
keep_cls_ids = np.ones(len(keep_idx)) * cls_id
bboxes.append(
np.concatenate(
[keep_cls_ids[:, None], keep_score[:, None], keep_box],
axis=-1))
num_per_img += len(keep_idx)
bbox_num.append(num_per_img)
return np.concatenate(bboxes, axis=0), np.array(bbox_num)
def get_test_images(infer_dir, infer_img):
"""
Get image path list in TEST mode
"""
assert infer_img is not None or infer_dir is not None, \
"--image_file or --image_dir should be set"
assert infer_img is None or os.path.isfile(infer_img), \
"{} is not a file".format(infer_img)
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
# infer_img has a higher priority
if infer_img and os.path.isfile(infer_img):
return [infer_img]
images = set()
infer_dir = os.path.abspath(infer_dir)
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
assert len(images) > 0, "no image found in {}".format(infer_dir)
print("Found {} inference images in total.".format(len(images)))
return images
def predict_image(infer_config, predictor, img_list):
# load preprocess transforms
transforms = Compose(infer_config['Preprocess'])
# predict image
for img_path in img_list:
inputs = transforms(img_path)
inputs_name = [var.name for var in predictor.get_inputs()]
inputs = {k: inputs[k][None, ] for k in inputs_name}
outputs = predictor.run(output_names=None, input_feed=inputs)
bboxes, bbox_num = multiclass_nms_rotated(
np.array(outputs[0]), np.array(outputs[1]))
print("ONNXRuntime predict: ")
for bbox in bboxes:
if bbox[0] > -1 and bbox[1] > infer_config['draw_threshold']:
print(f"{int(bbox[0])} {bbox[1]} "
f"{bbox[2]} {bbox[3]} {bbox[4]} {bbox[5]}"
f"{bbox[6]} {bbox[7]} {bbox[8]} {bbox[9]}")
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--infer_cfg", type=str, help="infer_cfg.yml")
parser.add_argument(
'--onnx_file',
type=str,
default="model.onnx",
help="onnx model file path")
parser.add_argument("--image_dir", type=str)
parser.add_argument("--image_file", type=str)
return parser.parse_args()
if __name__ == '__main__':
FLAGS = parse_args()
# load image list
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
# load predictor
predictor = InferenceSession(FLAGS.onnx_file)
# load infer config
with open(FLAGS.infer_cfg) as f:
infer_config = yaml.safe_load(f)
predict_image(infer_config, predictor, img_list)
| PaddleDetection/configs/rotate/tools/onnx_infer.py/0 | {
"file_path": "PaddleDetection/configs/rotate/tools/onnx_infer.py",
"repo_id": "PaddleDetection",
"token_count": 4776
} | 41 |
use_gpu: true
use_xpu: false
use_mlu: false
use_npu: false
log_iter: 20
save_dir: output
snapshot_epoch: 1
print_flops: false
print_params: false
# Exporting the model
export:
post_process: True # Whether post-processing is included in the network when export model.
nms: True # Whether NMS is included in the network when export model.
benchmark: False # It is used to testing model performance, if set `True`, post-process and NMS will not be exported.
fuse_conv_bn: False
| PaddleDetection/configs/runtime.yml/0 | {
"file_path": "PaddleDetection/configs/runtime.yml",
"repo_id": "PaddleDetection",
"token_count": 163
} | 42 |
# Distillation(蒸馏)
## 内容
- [YOLOv3模型蒸馏](#YOLOv3模型蒸馏)
- [FGD模型蒸馏](#FGD模型蒸馏)
- [CWD模型蒸馏](#CWD模型蒸馏)
- [LD模型蒸馏](#LD模型蒸馏)
- [PPYOLOE模型蒸馏](#PPYOLOE模型蒸馏)
- [引用](#引用)
## YOLOv3模型蒸馏
以YOLOv3-MobileNetV1为例,使用YOLOv3-ResNet34作为蒸馏训练的teacher网络, 对YOLOv3-MobileNetV1结构的student网络进行蒸馏。
COCO数据集作为目标检测任务的训练目标难度更大,意味着teacher网络会预测出更多的背景bbox,如果直接用teacher的预测输出作为student学习的`soft label`会有严重的类别不均衡问题。解决这个问题需要引入新的方法,详细背景请参考论文:[Object detection at 200 Frames Per Second](https://arxiv.org/abs/1805.06361)。
为了确定蒸馏的对象,我们首先需要找到student和teacher网络得到的`x,y,w,h,cls,objectness`等Tensor,用teacher得到的结果指导student训练。具体实现可参考[代码](../../../ppdet/slim/distill_loss.py)
| 模型 | 方案 | 输入尺寸 | epochs | Box mAP | 配置文件 | 下载链接 |
| :---------------: | :---------: | :----: | :----: |:-----------: | :--------------: | :------------: |
| YOLOv3-ResNet34 | teacher | 608 | 270e | 36.2 | [config](../../yolov3/yolov3_r34_270e_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/yolov3_r34_270e_coco.pdparams) |
| YOLOv3-MobileNetV1 | student | 608 | 270e | 29.4 | [config](../../yolov3/yolov3_mobilenet_v1_270e_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/yolov3_mobilenet_v1_270e_coco.pdparams) |
| YOLOv3-MobileNetV1 | distill | 608 | 270e | 31.0(+1.6) | [config](../../yolov3/yolov3_mobilenet_v1_270e_coco.yml),[slim_config](./yolov3_mobilenet_v1_coco_distill.yml) | [download](https://paddledet.bj.bcebos.com/models/slim/yolov3_mobilenet_v1_coco_distill.pdparams) |
<details>
<summary> 快速开始 </summary>
```shell
# 单卡训练(不推荐)
python tools/train.py -c configs/yolov3/yolov3_mobilenet_v1_270e_coco.yml --slim_config configs/slim/distill/yolov3_mobilenet_v1_coco_distill.yml
# 多卡训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/yolov3/yolov3_mobilenet_v1_270e_coco.yml --slim_config configs/slim/distill/yolov3_mobilenet_v1_coco_distill.yml
# 评估
python tools/eval.py -c configs/yolov3/yolov3_mobilenet_v1_270e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/slim/yolov3_mobilenet_v1_coco_distill.pdparams
# 预测
python tools/infer.py -c configs/yolov3/yolov3_mobilenet_v1_270e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/slim/yolov3_mobilenet_v1_coco_distill.pdparams --infer_img=demo/000000014439_640x640.jpg
```
- `-c`: 指定模型配置文件,也是student配置文件。
- `--slim_config`: 指定压缩策略配置文件,也是teacher配置文件。
</details>
## FGD模型蒸馏
FGD全称为[Focal and Global Knowledge Distillation for Detectors](https://arxiv.org/abs/2111.11837v1),是目标检测任务的一种蒸馏方法,FGD蒸馏分为两个部分`Focal`和`Global`。`Focal`蒸馏分离图像的前景和背景,让学生模型分别关注教师模型的前景和背景部分特征的关键像素;`Global`蒸馏部分重建不同像素之间的关系并将其从教师转移到学生,以补偿`Focal`蒸馏中丢失的全局信息。试验结果表明,FGD蒸馏算法在基于anchor和anchor free的方法上能有效提升模型精度。
在PaddleDetection中,我们实现了FGD算法,并基于RetinaNet算法进行验证,实验结果如下:
| 模型 | 方案 | 输入尺寸 | epochs | Box mAP | 配置文件 | 下载链接 |
| ----------------- | ----------- | ------ | :----: | :-----------: | :--------------: | :------------: |
| RetinaNet-ResNet101| teacher | 1333x800 | 2x | 40.6 | [config](../../retinanet/retinanet_r101_fpn_2x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/retinanet_r101_fpn_2x_coco.pdparams) |
| RetinaNet-ResNet50 | student | 1333x800 | 2x | 39.1 | [config](../../retinanet/retinanet_r50_fpn_2x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/retinanet_r50_fpn_2x_coco.pdparams) |
| RetinaNet-ResNet50 | FGD | 1333x800 | 2x | 40.8(+1.7) | [config](../../retinanet/retinanet_r50_fpn_2x_coco.yml),[slim_config](./retinanet_resnet101_coco_distill.yml) | [download](https://paddledet.bj.bcebos.com/models/retinanet_r101_distill_r50_2x_coco.pdparams) |
<details>
<summary> 快速开始 </summary>
```shell
# 单卡训练(不推荐)
python tools/train.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml --slim_config configs/slim/distill/retinanet_resnet101_coco_distill.yml
# 多卡训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml --slim_config configs/slim/distill/retinanet_resnet101_coco_distill.yml
# 评估
python tools/eval.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/retinanet_r101_distill_r50_2x_coco.pdparams
# 预测
python tools/infer.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/retinanet_r101_distill_r50_2x_coco.pdparams --infer_img=demo/000000014439_640x640.jpg
```
- `-c`: 指定模型配置文件,也是student配置文件。
- `--slim_config`: 指定压缩策略配置文件,也是teacher配置文件。
</details>
## CWD模型蒸馏
CWD全称为[Channel-wise Knowledge Distillation for Dense Prediction*](https://arxiv.org/pdf/2011.13256.pdf),通过最小化教师网络与学生网络的通道概率图之间的 Kullback-Leibler (KL) 散度,使得在蒸馏过程更加关注每个通道的最显著的区域,进而提升文本检测与图像分割任务的精度。在PaddleDetection中,我们实现了CWD算法,并基于GFL和RetinaNet模型进行验证,实验结果如下:
| 模型 | 方案 | 输入尺寸 | epochs | Box mAP | 配置文件 | 下载链接 |
| ----------------- | ----------- | ------ | :----: | :-----------: | :--------------: | :------------: |
| RetinaNet-ResNet101| teacher | 1333x800 | 2x | 40.6 | [config](../../retinanet/retinanet_r101_fpn_2x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/retinanet_r101_fpn_2x_coco.pdparams) |
| RetinaNet-ResNet50 | student | 1333x800 | 2x | 39.1 | [config](../../retinanet/retinanet_r50_fpn_2x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/retinanet_r50_fpn_2x_coco.pdparams) |
| RetinaNet-ResNet50 | CWD | 1333x800 | 2x | 40.5(+1.4) | [config](../../retinanet/retinanet_r50_fpn_2x_coco.yml),[slim_config](./retinanet_resnet101_coco_distill_cwd.yml) | [download](https://paddledet.bj.bcebos.com/models/retinanet_r50_fpn_2x_coco_cwd.pdparams) |
| GFL_ResNet101-vd| teacher | 1333x800 | 2x | 46.8 | [config](../../gfl/gfl_r101vd_fpn_mstrain_2x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/gfl_r101vd_fpn_mstrain_2x_coco.pdparams) |
| GFL_ResNet50 | student | 1333x800 | 1x | 41.0 | [config](../../gfl/gfl_r50_fpn_1x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/gfl_r50_fpn_1x_coco.pdparams) |
| GFL_ResNet50 | CWD | 1333x800 | 2x | 44.0(+3.0) | [config](../../gfl/gfl_r50_fpn_1x_coco.yml),[slim_config](./gfl_r101vd_fpn_coco_distill_cwd.yml) | [download](https://bj.bcebos.com/v1/paddledet/models/gfl_r50_fpn_2x_coco_cwd.pdparams) |
<details>
<summary> 快速开始 </summary>
```shell
# 单卡训练(不推荐)
python tools/train.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml --slim_config configs/slim/distill/retinanet_resnet101_coco_distill_cwd.yml
# 多卡训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml --slim_config configs/slim/distill/retinanet_resnet101_coco_distill_cwd.yml
# 评估
python tools/eval.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/retinanet_r50_fpn_2x_coco_cwd.pdparams
# 预测
python tools/infer.py -c configs/retinanet/retinanet_r50_fpn_2x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/retinanet_r50_fpn_2x_coco_cwd.pdparams --infer_img=demo/000000014439_640x640.jpg
# 单卡训练(不推荐)
python tools/train.py -c configs/gfl/gfl_r50_fpn_1x_coco.yml --slim_config configs/slim/distill/gfl_r101vd_fpn_coco_distill_cwd.yml
# 多卡训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/gfl/gfl_r50_fpn_1x_coco.yml --slim_config configs/slim/distill/gfl_r101vd_fpn_coco_distill_cwd.yml
# 评估
python tools/eval.py -c configs/gfl/gfl_r50_fpn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/gfl_r50_fpn_2x_coco_cwd.pdparams
# 预测
python tools/infer.py -c configs/gfl/gfl_r50_fpn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/gfl_r50_fpn_2x_coco_cwd.pdparams --infer_img=demo/000000014439_640x640.jpg
```
- `-c`: 指定模型配置文件,也是student配置文件。
- `--slim_config`: 指定压缩策略配置文件,也是teacher配置文件。
</details>
## LD模型蒸馏
LD全称为[Localization Distillation for Dense Object Detection](https://arxiv.org/abs/2102.12252),将回归框表示为概率分布,把分类任务的KD用在定位任务上,并且使用因地制宜、分而治之的策略,在不同的区域分别学习分类知识与定位知识。在PaddleDetection中,我们实现了LD算法,并基于GFL模型进行验证,实验结果如下:
| 模型 | 方案 | 输入尺寸 | epochs | Box mAP | 配置文件 | 下载链接 |
| ----------------- | ----------- | ------ | :----: | :-----------: | :--------------: | :------------: |
| GFL_ResNet101-vd| teacher | 1333x800 | 2x | 46.8 | [config](../../gfl/gfl_r101vd_fpn_mstrain_2x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/gfl_r101vd_fpn_mstrain_2x_coco.pdparams) |
| GFL_ResNet18-vd | student | 1333x800 | 1x | 36.6 | [config](../../gfl/gfl_r18vd_1x_coco.yml) | [download](https://paddledet.bj.bcebos.com/models/gfl_r18vd_1x_coco.pdparams) |
| GFL_ResNet18-vd | LD | 1333x800 | 1x | 38.2(+1.6) | [config](../../gfl/gfl_slim_ld_r18vd_1x_coco.yml),[slim_config](./gfl_ld_distill.yml) | [download](https://bj.bcebos.com/v1/paddledet/models/gfl_slim_ld_r18vd_1x_coco.pdparams) |
<details>
<summary> 快速开始 </summary>
```shell
# 单卡训练(不推荐)
python tools/train.py -c configs/gfl/gfl_slim_ld_r18vd_1x_coco.yml --slim_config configs/slim/distill/gfl_ld_distill.yml
# 多卡训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/gfl/gfl_slim_ld_r18vd_1x_coco.yml --slim_config configs/slim/distill/gfl_ld_distill.yml
# 评估
python tools/eval.py -c configs/gfl/gfl_slim_ld_r18vd_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/gfl_slim_ld_r18vd_1x_coco.pdparams
# 预测
python tools/infer.py -c configs/gfl/gfl_slim_ld_r18vd_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/gfl_slim_ld_r18vd_1x_coco.pdparams --infer_img=demo/000000014439_640x640.jpg
```
- `-c`: 指定模型配置文件,也是student配置文件。
- `--slim_config`: 指定压缩策略配置文件,也是teacher配置文件。
</details>
## PPYOLOE模型蒸馏
PaddleDetection提供了对PPYOLOE+ 进行模型蒸馏的方案,结合了logits蒸馏和feature蒸馏。
| 模型 | 方案 | 输入尺寸 | epochs | Box mAP | 配置文件 | 下载链接 |
| ----------------- | ----------- | ------ | :----: | :-----------: | :--------------: | :------------: |
| PP-YOLOE+_x | teacher | 640 | 80e | 54.7 | [config](../../ppyoloe/ppyoloe_plus_crn_x_80e_coco.yml) | [model](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_plus_crn_x_80e_coco.pdparams) |
| PP-YOLOE+_l | student | 640 | 80e | 52.9 | [config](../../ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml) | [model](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_plus_crn_l_80e_coco.pdparams) |
| PP-YOLOE+_l | distill | 640 | 80e | **54.0(+1.1)** | [config](../../ppyoloe/distill/ppyoloe_plus_crn_l_80e_coco_distill.yml),[slim_config](./ppyoloe_plus_distill_x_distill_l.yml) | [model](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_plus_crn_l_80e_coco_distill.pdparams) |
| PP-YOLOE+_l | teacher | 640 | 80e | 52.9 | [config](../../ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml) | [model](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_plus_crn_l_80e_coco.pdparams) |
| PP-YOLOE+_m | student | 640 | 80e | 49.8 | [config](../../ppyoloe/ppyoloe_plus_crn_m_80e_coco.yml) | [model](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_plus_crn_m_80e_coco.pdparams) |
| PP-YOLOE+_m | distill | 640 | 80e | **51.0(+1.2)** | [config](../../ppyoloe/distill/ppyoloe_plus_crn_m_80e_coco_distill.yml),[slim_config](./ppyoloe_plus_distill_l_distill_m.yml) | [model](https://bj.bcebos.com/v1/paddledet/models/ppyoloe_plus_crn_m_80e_coco_distill.pdparams) |
<details>
<summary> 快速开始 </summary>
```shell
# 单卡训练(不推荐)
python tools/train.py -c configs/ppyoloe/distill/ppyoloe_plus_crn_l_80e_coco_distill.yml --slim_config configs/slim/distill/ppyoloe_plus_distill_x_distill_l.yml
# 多卡训练
python -m paddle.distributed.launch --log_dir=logs/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyoloe/distill/ppyoloe_plus_crn_l_80e_coco_distill.yml --slim_config configs/slim/distill/ppyoloe_plus_distill_x_distill_l.yml
# 评估
python tools/eval.py -c configs/ppyoloe/distill/ppyoloe_plus_crn_l_80e_coco_distill.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco_distill.pdparams
# 预测
python tools/infer.py -c configs/ppyoloe/distill/ppyoloe_plus_crn_l_80e_coco_distill.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco_distill.pdparams --infer_img=demo/000000014439_640x640.jpg
```
- `-c`: 指定模型配置文件,也是student配置文件。
- `--slim_config`: 指定压缩策略配置文件,也是teacher配置文件。
</details>
## 引用
```
@article{mehta2018object,
title={Object detection at 200 Frames Per Second},
author={Rakesh Mehta and Cemalettin Ozturk},
year={2018},
eprint={1805.06361},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@inproceedings{yang2022focal,
title={Focal and global knowledge distillation for detectors},
author={Yang, Zhendong and Li, Zhe and Jiang, Xiaohu and Gong, Yuan and Yuan, Zehuan and Zhao, Danpei and Yuan, Chun},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={4643--4652},
year={2022}
}
@Inproceedings{zheng2022LD,
title={Localization Distillation for Dense Object Detection},
author= {Zheng, Zhaohui and Ye, Rongguang and Wang, Ping and Ren, Dongwei and Zuo, Wangmeng and Hou, Qibin and Cheng, Mingming},
booktitle={CVPR},
year={2022}
}
@inproceedings{shu2021channel,
title={Channel-wise knowledge distillation for dense prediction},
author={Shu, Changyong and Liu, Yifan and Gao, Jianfei and Yan, Zheng and Shen, Chunhua},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages={5311--5320},
year={2021}
}
```
| PaddleDetection/configs/slim/distill/README.md/0 | {
"file_path": "PaddleDetection/configs/slim/distill/README.md",
"repo_id": "PaddleDetection",
"token_count": 8567
} | 43 |
weights: https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams
slim: PTQ
PTQ:
ptq_config: {
'activation_quantizer': 'HistQuantizer',
'upsample_bins': 127,
'hist_percent': 0.999}
quant_batch_num: 10
fuse: True
| PaddleDetection/configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml/0 | {
"file_path": "PaddleDetection/configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml",
"repo_id": "PaddleDetection",
"token_count": 118
} | 44 |
pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams
slim: QAT
QAT:
quant_config: {
'activation_preprocess_type': 'PACT',
'weight_quantize_type': 'channel_wise_abs_max', 'activation_quantize_type': 'moving_average_abs_max',
'weight_bits': 8, 'activation_bits': 8, 'dtype': 'int8', 'window_size': 10000, 'moving_rate': 0.9,
'quantizable_layer_type': ['Conv2D', 'Linear']}
print_model: True
epoch: 50
LearningRate:
base_lr: 0.0005
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones:
- 30
- 45
- !LinearWarmup
start_factor: 0.
steps: 1000
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0005
type: L2
PPYOLOFPN:
coord_conv: true
block_size: 3
keep_prob: 0.9
spp: true
drop_block: false
| PaddleDetection/configs/slim/quant/ppyolo_r50vd_qat_pact.yml/0 | {
"file_path": "PaddleDetection/configs/slim/quant/ppyolo_r50vd_qat_pact.yml",
"repo_id": "PaddleDetection",
"token_count": 372
} | 45 |
_BASE_: [
'../datasets/coco_instance.yml',
'../runtime.yml',
'_base_/solov2_r50_fpn.yml',
'_base_/optimizer_1x.yml',
'_base_/solov2_reader.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet101_vd_pretrained.pdparams
weights: output/solov2_r101_vd_fpn_3x_coco/model_final
epoch: 36
use_ema: true
ema_decay: 0.9998
ResNet:
depth: 101
variant: d
freeze_at: 0
return_idx: [0,1,2,3]
dcn_v2_stages: [1,2,3]
num_stages: 4
SOLOv2Head:
seg_feat_channels: 512
stacked_convs: 4
num_grids: [40, 36, 24, 16, 12]
kernel_out_channels: 256
solov2_loss: SOLOv2Loss
mask_nms: MaskMatrixNMS
dcn_v2_stages: [0, 1, 2, 3]
SOLOv2MaskHead:
mid_channels: 128
out_channels: 256
start_level: 0
end_level: 3
use_dcn_in_tower: True
LearningRate:
base_lr: 0.01
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [24, 33]
- !LinearWarmup
start_factor: 0.
steps: 2000
TrainReader:
sample_transforms:
- Decode: {}
- Poly2Mask: {}
- RandomResize: {interp: 1,
target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]],
keep_ratio: True}
- RandomFlip: {}
- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}
- Permute: {}
batch_transforms:
- PadBatch: {pad_to_stride: 32}
- Gt2Solov2Target: {num_grids: [40, 36, 24, 16, 12],
scale_ranges: [[1, 96], [48, 192], [96, 384], [192, 768], [384, 2048]],
coord_sigma: 0.2}
batch_size: 2
shuffle: true
drop_last: true
| PaddleDetection/configs/solov2/solov2_r101_vd_fpn_3x_coco.yml/0 | {
"file_path": "PaddleDetection/configs/solov2/solov2_r101_vd_fpn_3x_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 811
} | 46 |
epoch: 120
LearningRate:
base_lr: 0.001
schedulers:
- !PiecewiseDecay
milestones: [40, 60, 80, 100]
gamma: [0.5, 0.5, 0.4, 0.1]
use_warmup: false
OptimizerBuilder:
optimizer:
momentum: 0.0
type: RMSProp
regularizer:
factor: 0.00005
type: L2
| PaddleDetection/configs/ssd/_base_/optimizer_120e.yml/0 | {
"file_path": "PaddleDetection/configs/ssd/_base_/optimizer_120e.yml",
"repo_id": "PaddleDetection",
"token_count": 135
} | 47 |
_BASE_: [
'../datasets/voc.yml',
'../runtime.yml',
'_base_/optimizer_120e.yml',
'_base_/ssd_mobilenet_v1_300.yml',
'_base_/ssd_mobilenet_reader.yml',
]
weights: output/ssd_mobilenet_v1_300_120e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
| PaddleDetection/configs/ssd/ssd_mobilenet_v1_300_120e_voc.yml/0 | {
"file_path": "PaddleDetection/configs/ssd/ssd_mobilenet_v1_300_120e_voc.yml",
"repo_id": "PaddleDetection",
"token_count": 169
} | 48 |
# 1. TTFNet
## Introduction
TTFNet is a network used for real-time object detection and friendly to training time. It improves the slow convergence speed of CenterNet and proposes a new method to generate training samples using Gaussian kernel, which effectively eliminates the fuzziness existing in Anchor Free head. At the same time, the simple and lightweight network structure is also easy to expand the task.
**Characteristics:**
The structure is simple, requiring only two heads to detect target position and size, and eliminating time-consuming post-processing operations
The training time is short. Based on DarkNet53 backbone network, V100 8 cards only need 2 hours of training to achieve better model effect
## Model Zoo
| Backbone | Network type | Number of images per GPU | Learning rate strategy | Inferring time(fps) | Box AP | Download | Configuration File |
| :-------- | :----------- | :----------------------: | :--------------------: | :-----------------: | :----: | :------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------: |
| DarkNet53 | TTFNet | 12 | 1x | ---- | 33.5 | [link](https://paddledet.bj.bcebos.com/models/ttfnet_darknet53_1x_coco.pdparams) | [Configuration File](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ttfnet/ttfnet_darknet53_1x_coco.yml) |
# 2. PAFNet
## Introduction
PAFNet (Paddle Anchor Free) is an optimized model of PaddleDetection based on TTF Net, whose accuracy reaches the SOTA level in the Anchor Free field, and meanwhile produces mobile lightweight model PAFNet-Lite
PAFNet series models optimize TTFNet model from the following aspects:
- [CutMix](https://arxiv.org/abs/1905.04899)
- Better backbone network: ResNet50vd-DCN
- Larger training batch size: 8 GPUs, each GPU batch size=18
- Synchronized Batch Normalization
- [Deformable Convolution](https://arxiv.org/abs/1703.06211)
- [Exponential Moving Average](https://www.investopedia.com/terms/e/ema.asp)
- Better pretraining model
## Model library
| Backbone | Net type | Number of images per GPU | Learning rate strategy | Inferring time(fps) | Box AP | Download | Configuration File |
| :--------- | :------- | :----------------------: | :--------------------: | :-----------------: | :----: | :---------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: |
| ResNet50vd | PAFNet | 18 | 10x | ---- | 39.8 | [link](https://paddledet.bj.bcebos.com/models/pafnet_10x_coco.pdparams) | [Configuration File](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ttfnet/pafnet_10x_coco.yml) |
### PAFNet-Lite
| Backbone | Net type | Number of images per GPU | Learning rate strategy | Box AP | kirin 990 delay(ms) | volume(M) | Download | Configuration File |
| :---------- | :---------- | :----------------------: | :--------------------: | :----: | :-------------------: | :---------: | :---------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------: |
| MobileNetv3 | PAFNet-Lite | 12 | 20x | 23.9 | 26.00 | 14 | [link](https://paddledet.bj.bcebos.com/models/pafnet_lite_mobilenet_v3_20x_coco.pdparams) | [Configuration File](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ttfnet/pafnet_lite_mobilenet_v3_20x_coco.yml) |
**Attention:** Due to the overall upgrade of the dynamic graph framework, the weighting model published by PaddleDetection of PAF Net needs to be evaluated with a --bias field, for example
```bash
# Published weights using Paddle Detection
CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/ppyolo/pafnet_10x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/pafnet_10x_coco.pdparams --bias
```
## Citations
```
@article{liu2019training,
title = {Training-Time-Friendly Network for Real-Time Object Detection},
author = {Zili Liu, Tu Zheng, Guodong Xu, Zheng Yang, Haifeng Liu, Deng Cai},
journal = {arXiv preprint arXiv:1909.00700},
year = {2019}
}
```
| PaddleDetection/configs/ttfnet/README_en.md/0 | {
"file_path": "PaddleDetection/configs/ttfnet/README_en.md",
"repo_id": "PaddleDetection",
"token_count": 1925
} | 49 |
epoch: 12
LearningRate:
base_lr: 0.0001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones: [9, 11]
- !LinearWarmup
start_factor: 0.001
steps: 1000
OptimizerBuilder:
optimizer:
type: AdamWDL
betas: [0.9, 0.999]
layer_decay: 0.75
weight_decay: 0.02
num_layers: 12
filter_bias_and_bn: True
skip_decay_names: ['pos_embed', 'cls_token']
set_param_lr_func: 'layerwise_lr_decay'
| PaddleDetection/configs/vitdet/_base_/optimizer_base_1x.yml/0 | {
"file_path": "PaddleDetection/configs/vitdet/_base_/optimizer_base_1x.yml",
"repo_id": "PaddleDetection",
"token_count": 209
} | 50 |
epoch: 40
LearningRate:
base_lr: 0.0001
schedulers:
- name: PiecewiseDecay
gamma: 0.1
milestones:
- 32
- 36
- name: LinearWarmup
start_factor: 0.3333333333333333
steps: 100
OptimizerBuilder:
optimizer:
momentum: 0.9
type: Momentum
regularizer:
factor: 0.0005
type: L2
| PaddleDetection/configs/yolov3/_base_/optimizer_40e.yml/0 | {
"file_path": "PaddleDetection/configs/yolov3/_base_/optimizer_40e.yml",
"repo_id": "PaddleDetection",
"token_count": 143
} | 51 |
_BASE_: [
'../datasets/voc.yml',
'../runtime.yml',
'_base_/optimizer_270e.yml',
'_base_/yolov3_mobilenet_v1.yml',
'_base_/yolov3_reader.yml',
]
snapshot_epoch: 5
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV1_ssld_pretrained.pdparams
weights: output/yolov3_mobilenet_v1_ssld_270e_voc/model_final
# set collate_batch to false because ground-truth info is needed
# on voc dataset and should not collate data in batch when batch size
# is larger than 1.
EvalReader:
collate_batch: false
LearningRate:
base_lr: 0.001
schedulers:
- !PiecewiseDecay
gamma: 0.1
milestones:
- 216
- 243
- !LinearWarmup
start_factor: 0.
steps: 1000
| PaddleDetection/configs/yolov3/yolov3_mobilenet_v1_ssld_270e_voc.yml/0 | {
"file_path": "PaddleDetection/configs/yolov3/yolov3_mobilenet_v1_ssld_270e_voc.yml",
"repo_id": "PaddleDetection",
"token_count": 300
} | 52 |
# PaddleDetection Predict deployment
PaddleDetection provides multiple deployment forms of Paddle Inference, Paddle Serving and Paddle-Lite, supports multiple platforms such as server, mobile and embedded, and provides a complete Python and C++ deployment solution
## PaddleDetection This section describes the supported deployment modes
| formalization | language | Tutorial | Equipment/Platform |
| ---------------- | -------- | ----------- | ------------------------- |
| Paddle Inference | Python | Has perfect | Linux(ARM\X86)、Windows |
| Paddle Inference | C++ | Has perfect | Linux(ARM\X86)、Windows |
| Paddle Serving | Python | Has perfect | Linux(ARM\X86)、Windows |
| Paddle-Lite | C++ | Has perfect | Android、IOS、FPGA、RK... |
## 1.Paddle Inference Deployment
### 1.1 The export model
Use the `tools/export_model.py` script to export the model and the configuration file used during deployment. The configuration file name is `infer_cfg.yml`. The model export script is as follows
```bash
# The YOLOv3 model is derived
python tools/export_model.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml -o weights=output/yolov3_mobilenet_v1_roadsign/best_model.pdparams
```
The prediction model will be exported to the `output_inference/yolov3_mobilenet_v1_roadsign` directory `infer_cfg.yml`, `model.pdiparams`, `model.pdiparams.info`, `model.pdmodel`. For details on model export, please refer to the documentation [Tutorial on Paddle Detection MODEL EXPORT](./EXPORT_MODEL_en.md).
### 1.2 Use Paddle Inference to Make Predictions
* Python deployment supports `CPU`, `GPU` and `XPU` environments, Windows, Linux, and NV Jetson embedded devices. Reference Documentation [Python Deployment](python/README.md)
* C++ deployment supports `CPU`, `GPU` and `XPU` environments, Windows and Linux systems, and NV Jetson embedded devices. Reference documentation [C++ deployment](cpp/README.md)
* PaddleDetection supports TensorRT acceleration. Please refer to the documentation for [TensorRT Predictive Deployment Tutorial](TENSOR_RT.md)
**Attention:** Paddle prediction library version requires >=2.1, and batch_size>1 only supports YOLOv3 and PP-YOLO.
## 2.PaddleServing Deployment
### 2.1 Export model
If you want to export the model in `PaddleServing` format, set `export_serving_model=True`:
```buildoutcfg
python tools/export_model.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml -o weights=output/yolov3_mobilenet_v1_roadsign/best_model.pdparams --export_serving_model=True
```
The prediction model will be exported to the `output_inference/yolov3_darknet53_270e_coco` directory `infer_cfg.yml`, `model.pdiparams`, `model.pdiparams.info`, `model.pdmodel`, `serving_client/` and `serving_server/` folder.
For details on model export, please refer to the documentation [Tutorial on Paddle Detection MODEL EXPORT](./EXPORT_MODEL_en.md).
### 2.2 Predictions are made using Paddle Serving
* [Install PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md#installation)
* [Use PaddleServing](./serving/README.md)
## 3. PaddleLite Deployment
- [Deploy the PaddleDetection model using PaddleLite](./lite/README.md)
- For details, please refer to [Paddle-Lite-Demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo) deployment. For more information, please refer to [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite)
## 4.Third-Engine deploy(MNN、NCNN、Openvino)
- The Third-Engine deploy take example of PicoDet、TinyPose,the others model is the same
- Suggestion for TinyPose: For Intel CPU Openvino is recommended,for Nvidia GPU PaddleInference is recommended,and for ARM/ANDROID PaddleLite or MNN is recommended.
| Third_Engine | MNN | NCNN | OPENVINO |
| ------------ | ------------------------------------------------------ | -------------------------------------------------- | ------------------------------------------------------------ |
| PicoDet | [PicoDet_MNN](./third_engine/demo_mnn/README.md) | [PicoDet_NCNN](./third_engine/demo_ncnn/README.md) | [PicoDet_OPENVINO](./third_engine/demo_openvino/README.md) |
| TinyPose | [TinyPose_MNN](./third_engine/demo_mnn_kpts/README.md) | - | [TinyPose_OPENVINO](./third_engine/demo_openvino_kpts/README.md) |
## 5. Benchmark Test
- Using the exported model, run the Benchmark batch test script:
```shell
sh deploy/benchmark/benchmark.sh {model_dir} {model_name}
```
**Attention** If it is a quantitative model, please use the `deploy/benchmark/benchmark_quant.sh` script.
- Export the test result log to Excel:
```
python deploy/benchmark/log_parser_excel.py --log_path=./output_pipeline --output_name=benchmark_excel.xlsx
```
## 6. FAQ
- 1、Can `Paddle 1.8.4` trained models be deployed with `Paddle2.0`?
Paddle 2.0 is compatible with Paddle 1.8.4, so it is ok. However, some models (such as SOLOv2) use the new OP in Paddle 2.0, which is not allowed.
- 2、When compiling for Windows, the prediction library is compiled with VS2015, will it be a problem to choose VS2017 or VS2019?
For compatibility issues with VS, please refer to: [C++ Visual Studio 2015, 2017 and 2019 binary compatibility](https://docs.microsoft.com/zh-cn/cpp/porting/binary-compat-2015-2017?view=msvc-160)
- 3、Does cuDNN 8.0.4 continuously predict memory leaks?
QA tests show that cuDNN 8 series have memory leakage problems in continuous prediction, and cuDNN 8 performance is worse than cuDNN7. CUDA + cuDNN7.6.4 is recommended for deployment.
| PaddleDetection/deploy/README_en.md/0 | {
"file_path": "PaddleDetection/deploy/README_en.md",
"repo_id": "PaddleDetection",
"token_count": 1957
} | 53 |
Subsets and Splits