# File: autotrain-advanced-main/src/autotrain/__init__.py
import os
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
import warnings
import torch._dynamo
from autotrain.logging import Logger
torch._dynamo.config.suppress_errors = True
warnings.filterwarnings('ignore', category=UserWarning, module='tensorflow')
warnings.filterwarnings('ignore', category=UserWarning, module='transformers')
warnings.filterwarnings('ignore', category=UserWarning, module='peft')
warnings.filterwarnings('ignore', category=UserWarning, module='accelerate')
warnings.filterwarnings('ignore', category=UserWarning, module='datasets')
warnings.filterwarnings('ignore', category=FutureWarning, module='accelerate')
warnings.filterwarnings('ignore', category=UserWarning, module='huggingface_hub')
logger = Logger().get_logger()
__version__ = '0.8.19'
def is_colab():
try:
import google.colab
return True
except ImportError:
return False
def is_unsloth_available():
try:
from unsloth import FastLanguageModel
return True
except Exception as e:
logger.warning('Unsloth not available, continuing without it')
logger.warning(e)
return False
# File: autotrain-advanced-main/src/autotrain/app/api_routes.py
import json
from typing import Any, Dict, List, Literal, Optional, Tuple, Union, get_type_hints
from fastapi import APIRouter, Depends, HTTPException, Request, status
from fastapi.responses import JSONResponse
from huggingface_hub import HfApi
from pydantic import BaseModel, create_model, model_validator
from autotrain import __version__, logger
from autotrain.app.params import HIDDEN_PARAMS, PARAMS, AppParams
from autotrain.app.utils import token_verification
from autotrain.project import AutoTrainProject
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
FIELDS_TO_EXCLUDE = HIDDEN_PARAMS + ['push_to_hub']
def create_api_base_model(base_class, class_name):
annotations = get_type_hints(base_class)
if class_name in ('LLMSFTTrainingParamsAPI', 'LLMRewardTrainingParamsAPI'):
more_hidden_params = ['model_ref', 'dpo_beta', 'add_eos_token', 'max_prompt_length', 'max_completion_length']
elif class_name == 'LLMORPOTrainingParamsAPI':
more_hidden_params = ['model_ref', 'dpo_beta', 'add_eos_token']
elif class_name == 'LLMDPOTrainingParamsAPI':
more_hidden_params = ['add_eos_token']
elif class_name == 'LLMGenericTrainingParamsAPI':
more_hidden_params = ['model_ref', 'dpo_beta', 'max_prompt_length', 'max_completion_length']
else:
more_hidden_params = []
_excluded = FIELDS_TO_EXCLUDE + more_hidden_params
new_fields: Dict[str, Tuple[Any, Any]] = {}
for (name, field) in base_class.__fields__.items():
if name not in _excluded:
field_type = annotations[name]
if field.default is not None:
field_default = field.default
elif field.default_factory is not None:
field_default = field.default_factory
else:
field_default = None
new_fields[name] = (field_type, field_default)
return create_model(class_name, **{key: (value[0], value[1]) for (key, value) in new_fields.items()}, __config__=type('Config', (), {'protected_namespaces': ()}))
LLMSFTTrainingParamsAPI = create_api_base_model(LLMTrainingParams, 'LLMSFTTrainingParamsAPI')
LLMDPOTrainingParamsAPI = create_api_base_model(LLMTrainingParams, 'LLMDPOTrainingParamsAPI')
LLMORPOTrainingParamsAPI = create_api_base_model(LLMTrainingParams, 'LLMORPOTrainingParamsAPI')
LLMGenericTrainingParamsAPI = create_api_base_model(LLMTrainingParams, 'LLMGenericTrainingParamsAPI')
LLMRewardTrainingParamsAPI = create_api_base_model(LLMTrainingParams, 'LLMRewardTrainingParamsAPI')
DreamBoothTrainingParamsAPI = create_api_base_model(DreamBoothTrainingParams, 'DreamBoothTrainingParamsAPI')
ImageClassificationParamsAPI = create_api_base_model(ImageClassificationParams, 'ImageClassificationParamsAPI')
Seq2SeqParamsAPI = create_api_base_model(Seq2SeqParams, 'Seq2SeqParamsAPI')
TabularClassificationParamsAPI = create_api_base_model(TabularParams, 'TabularClassificationParamsAPI')
TabularRegressionParamsAPI = create_api_base_model(TabularParams, 'TabularRegressionParamsAPI')
TextClassificationParamsAPI = create_api_base_model(TextClassificationParams, 'TextClassificationParamsAPI')
TextRegressionParamsAPI = create_api_base_model(TextRegressionParams, 'TextRegressionParamsAPI')
TokenClassificationParamsAPI = create_api_base_model(TokenClassificationParams, 'TokenClassificationParamsAPI')
SentenceTransformersParamsAPI = create_api_base_model(SentenceTransformersParams, 'SentenceTransformersParamsAPI')
ImageRegressionParamsAPI = create_api_base_model(ImageRegressionParams, 'ImageRegressionParamsAPI')
VLMTrainingParamsAPI = create_api_base_model(VLMTrainingParams, 'VLMTrainingParamsAPI')
ExtractiveQuestionAnsweringParamsAPI = create_api_base_model(ExtractiveQuestionAnsweringParams, 'ExtractiveQuestionAnsweringParamsAPI')
class LLMSFTColumnMapping(BaseModel):
text_column: str
class LLMDPOColumnMapping(BaseModel):
text_column: str
rejected_text_column: str
prompt_text_column: str
class LLMORPOColumnMapping(BaseModel):
text_column: str
rejected_text_column: str
prompt_text_column: str
class LLMGenericColumnMapping(BaseModel):
text_column: str
class LLMRewardColumnMapping(BaseModel):
text_column: str
rejected_text_column: str
class DreamBoothColumnMapping(BaseModel):
default: Optional[str] = None
class ImageClassificationColumnMapping(BaseModel):
image_column: str
target_column: str
class ImageRegressionColumnMapping(BaseModel):
image_column: str
target_column: str
class Seq2SeqColumnMapping(BaseModel):
text_column: str
target_column: str
class TabularClassificationColumnMapping(BaseModel):
id_column: str
target_columns: List[str]
class TabularRegressionColumnMapping(BaseModel):
id_column: str
target_columns: List[str]
class TextClassificationColumnMapping(BaseModel):
text_column: str
target_column: str
class TextRegressionColumnMapping(BaseModel):
text_column: str
target_column: str
class TokenClassificationColumnMapping(BaseModel):
tokens_column: str
tags_column: str
class STPairColumnMapping(BaseModel):
sentence1_column: str
sentence2_column: str
class STPairClassColumnMapping(BaseModel):
sentence1_column: str
sentence2_column: str
target_column: str
class STPairScoreColumnMapping(BaseModel):
sentence1_column: str
sentence2_column: str
target_column: str
class STTripletColumnMapping(BaseModel):
sentence1_column: str
sentence2_column: str
sentence3_column: str
class STQAColumnMapping(BaseModel):
sentence1_column: str
sentence2_column: str
class VLMColumnMapping(BaseModel):
image_column: str
text_column: str
prompt_text_column: str
class ExtractiveQuestionAnsweringColumnMapping(BaseModel):
text_column: str
question_column: str
answer_column: str
class APICreateProjectModel(BaseModel):
project_name: str
task: Literal['llm:sft', 'llm:dpo', 'llm:orpo', 'llm:generic', 'llm:reward', 'st:pair', 'st:pair_class', 'st:pair_score', 'st:triplet', 'st:qa', 'image-classification', 'dreambooth', 'seq2seq', 'token-classification', 'text-classification', 'text-regression', 'tabular-classification', 'tabular-regression', 'image-regression', 'vlm:captioning', 'vlm:vqa', 'extractive-question-answering']
base_model: str
hardware: Literal['spaces-a10g-large', 'spaces-a10g-small', 'spaces-a100-large', 'spaces-t4-medium', 'spaces-t4-small', 'spaces-cpu-upgrade', 'spaces-cpu-basic', 'spaces-l4x1', 'spaces-l4x4', 'spaces-l40sx1', 'spaces-l40sx4', 'spaces-l40sx8', 'spaces-a10g-largex2', 'spaces-a10g-largex4']
params: Union[LLMSFTTrainingParamsAPI, LLMDPOTrainingParamsAPI, LLMORPOTrainingParamsAPI, LLMGenericTrainingParamsAPI, LLMRewardTrainingParamsAPI, SentenceTransformersParamsAPI, DreamBoothTrainingParamsAPI, ImageClassificationParamsAPI, Seq2SeqParamsAPI, TabularClassificationParamsAPI, TabularRegressionParamsAPI, TextClassificationParamsAPI, TextRegressionParamsAPI, TokenClassificationParamsAPI, ImageRegressionParamsAPI, VLMTrainingParamsAPI, ExtractiveQuestionAnsweringParamsAPI]
username: str
column_mapping: Optional[Union[LLMSFTColumnMapping, LLMDPOColumnMapping, LLMORPOColumnMapping, LLMGenericColumnMapping, LLMRewardColumnMapping, DreamBoothColumnMapping, ImageClassificationColumnMapping, Seq2SeqColumnMapping, TabularClassificationColumnMapping, TabularRegressionColumnMapping, TextClassificationColumnMapping, TextRegressionColumnMapping, TokenClassificationColumnMapping, STPairColumnMapping, STPairClassColumnMapping, STPairScoreColumnMapping, STTripletColumnMapping, STQAColumnMapping, ImageRegressionColumnMapping, VLMColumnMapping, ExtractiveQuestionAnsweringColumnMapping]] = None
hub_dataset: str
train_split: str
valid_split: Optional[str] = None
@model_validator(mode='before')
@classmethod
def validate_column_mapping(cls, values):
if values.get('task') == 'llm:sft':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for llm:sft')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for llm:sft')
values['column_mapping'] = LLMSFTColumnMapping(**values['column_mapping'])
elif values.get('task') == 'llm:dpo':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for llm:dpo')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for llm:dpo')
if not values.get('column_mapping').get('rejected_text_column'):
raise ValueError('rejected_text_column is required for llm:dpo')
if not values.get('column_mapping').get('prompt_text_column'):
raise ValueError('prompt_text_column is required for llm:dpo')
values['column_mapping'] = LLMDPOColumnMapping(**values['column_mapping'])
elif values.get('task') == 'llm:orpo':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for llm:orpo')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for llm:orpo')
if not values.get('column_mapping').get('rejected_text_column'):
raise ValueError('rejected_text_column is required for llm:orpo')
if not values.get('column_mapping').get('prompt_text_column'):
raise ValueError('prompt_text_column is required for llm:orpo')
values['column_mapping'] = LLMORPOColumnMapping(**values['column_mapping'])
elif values.get('task') == 'llm:generic':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for llm:generic')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for llm:generic')
values['column_mapping'] = LLMGenericColumnMapping(**values['column_mapping'])
elif values.get('task') == 'llm:reward':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for llm:reward')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for llm:reward')
if not values.get('column_mapping').get('rejected_text_column'):
raise ValueError('rejected_text_column is required for llm:reward')
values['column_mapping'] = LLMRewardColumnMapping(**values['column_mapping'])
elif values.get('task') == 'dreambooth':
if values.get('column_mapping'):
raise ValueError('column_mapping is not required for dreambooth')
elif values.get('task') == 'seq2seq':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for seq2seq')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for seq2seq')
if not values.get('column_mapping').get('target_column'):
raise ValueError('target_column is required for seq2seq')
values['column_mapping'] = Seq2SeqColumnMapping(**values['column_mapping'])
elif values.get('task') == 'image-classification':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for image-classification')
if not values.get('column_mapping').get('image_column'):
raise ValueError('image_column is required for image-classification')
if not values.get('column_mapping').get('target_column'):
raise ValueError('target_column is required for image-classification')
values['column_mapping'] = ImageClassificationColumnMapping(**values['column_mapping'])
elif values.get('task') == 'tabular-classification':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for tabular-classification')
if not values.get('column_mapping').get('id_column'):
raise ValueError('id_column is required for tabular-classification')
if not values.get('column_mapping').get('target_columns'):
raise ValueError('target_columns is required for tabular-classification')
values['column_mapping'] = TabularClassificationColumnMapping(**values['column_mapping'])
elif values.get('task') == 'tabular-regression':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for tabular-regression')
if not values.get('column_mapping').get('id_column'):
raise ValueError('id_column is required for tabular-regression')
if not values.get('column_mapping').get('target_columns'):
raise ValueError('target_columns is required for tabular-regression')
values['column_mapping'] = TabularRegressionColumnMapping(**values['column_mapping'])
elif values.get('task') == 'text-classification':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for text-classification')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for text-classification')
if not values.get('column_mapping').get('target_column'):
raise ValueError('target_column is required for text-classification')
values['column_mapping'] = TextClassificationColumnMapping(**values['column_mapping'])
elif values.get('task') == 'text-regression':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for text-regression')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for text-regression')
if not values.get('column_mapping').get('target_column'):
raise ValueError('target_column is required for text-regression')
values['column_mapping'] = TextRegressionColumnMapping(**values['column_mapping'])
elif values.get('task') == 'token-classification':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for token-classification')
if not values.get('column_mapping').get('tokens_column'):
raise ValueError('tokens_column is required for token-classification')
if not values.get('column_mapping').get('tags_column'):
raise ValueError('tags_column is required for token-classification')
values['column_mapping'] = TokenClassificationColumnMapping(**values['column_mapping'])
elif values.get('task') == 'st:pair':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for st:pair')
if not values.get('column_mapping').get('sentence1_column'):
raise ValueError('sentence1_column is required for st:pair')
if not values.get('column_mapping').get('sentence2_column'):
raise ValueError('sentence2_column is required for st:pair')
values['column_mapping'] = STPairColumnMapping(**values['column_mapping'])
elif values.get('task') == 'st:pair_class':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for st:pair_class')
if not values.get('column_mapping').get('sentence1_column'):
raise ValueError('sentence1_column is required for st:pair_class')
if not values.get('column_mapping').get('sentence2_column'):
raise ValueError('sentence2_column is required for st:pair_class')
if not values.get('column_mapping').get('target_column'):
raise ValueError('target_column is required for st:pair_class')
values['column_mapping'] = STPairClassColumnMapping(**values['column_mapping'])
elif values.get('task') == 'st:pair_score':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for st:pair_score')
if not values.get('column_mapping').get('sentence1_column'):
raise ValueError('sentence1_column is required for st:pair_score')
if not values.get('column_mapping').get('sentence2_column'):
raise ValueError('sentence2_column is required for st:pair_score')
if not values.get('column_mapping').get('target_column'):
raise ValueError('target_column is required for st:pair_score')
values['column_mapping'] = STPairScoreColumnMapping(**values['column_mapping'])
elif values.get('task') == 'st:triplet':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for st:triplet')
if not values.get('column_mapping').get('sentence1_column'):
raise ValueError('sentence1_column is required for st:triplet')
if not values.get('column_mapping').get('sentence2_column'):
raise ValueError('sentence2_column is required for st:triplet')
if not values.get('column_mapping').get('sentence3_column'):
raise ValueError('sentence3_column is required for st:triplet')
values['column_mapping'] = STTripletColumnMapping(**values['column_mapping'])
elif values.get('task') == 'st:qa':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for st:qa')
if not values.get('column_mapping').get('sentence1_column'):
raise ValueError('sentence1_column is required for st:qa')
if not values.get('column_mapping').get('sentence2_column'):
raise ValueError('sentence2_column is required for st:qa')
values['column_mapping'] = STQAColumnMapping(**values['column_mapping'])
elif values.get('task') == 'image-regression':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for image-regression')
if not values.get('column_mapping').get('image_column'):
raise ValueError('image_column is required for image-regression')
if not values.get('column_mapping').get('target_column'):
raise ValueError('target_column is required for image-regression')
values['column_mapping'] = ImageRegressionColumnMapping(**values['column_mapping'])
elif values.get('task') == 'vlm:captioning':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for vlm:captioning')
if not values.get('column_mapping').get('image_column'):
raise ValueError('image_column is required for vlm:captioning')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for vlm:captioning')
if not values.get('column_mapping').get('prompt_text_column'):
raise ValueError('prompt_text_column is required for vlm:captioning')
values['column_mapping'] = VLMColumnMapping(**values['column_mapping'])
elif values.get('task') == 'vlm:vqa':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for vlm:vqa')
if not values.get('column_mapping').get('image_column'):
raise ValueError('image_column is required for vlm:vqa')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for vlm:vqa')
if not values.get('column_mapping').get('prompt_text_column'):
raise ValueError('prompt_text_column is required for vlm:vqa')
values['column_mapping'] = VLMColumnMapping(**values['column_mapping'])
elif values.get('task') == 'extractive-question-answering':
if not values.get('column_mapping'):
raise ValueError('column_mapping is required for extractive-question-answering')
if not values.get('column_mapping').get('text_column'):
raise ValueError('text_column is required for extractive-question-answering')
if not values.get('column_mapping').get('question_column'):
raise ValueError('question_column is required for extractive-question-answering')
if not values.get('column_mapping').get('answer_column'):
raise ValueError('answer_column is required for extractive-question-answering')
values['column_mapping'] = ExtractiveQuestionAnsweringColumnMapping(**values['column_mapping'])
return values
@model_validator(mode='before')
@classmethod
def validate_params(cls, values):
if values.get('task') == 'llm:sft':
values['params'] = LLMSFTTrainingParamsAPI(**values['params'])
elif values.get('task') == 'llm:dpo':
values['params'] = LLMDPOTrainingParamsAPI(**values['params'])
elif values.get('task') == 'llm:orpo':
values['params'] = LLMORPOTrainingParamsAPI(**values['params'])
elif values.get('task') == 'llm:generic':
values['params'] = LLMGenericTrainingParamsAPI(**values['params'])
elif values.get('task') == 'llm:reward':
values['params'] = LLMRewardTrainingParamsAPI(**values['params'])
elif values.get('task') == 'dreambooth':
values['params'] = DreamBoothTrainingParamsAPI(**values['params'])
elif values.get('task') == 'seq2seq':
values['params'] = Seq2SeqParamsAPI(**values['params'])
elif values.get('task') == 'image-classification':
values['params'] = ImageClassificationParamsAPI(**values['params'])
elif values.get('task') == 'tabular-classification':
values['params'] = TabularClassificationParamsAPI(**values['params'])
elif values.get('task') == 'tabular-regression':
values['params'] = TabularRegressionParamsAPI(**values['params'])
elif values.get('task') == 'text-classification':
values['params'] = TextClassificationParamsAPI(**values['params'])
elif values.get('task') == 'text-regression':
values['params'] = TextRegressionParamsAPI(**values['params'])
elif values.get('task') == 'token-classification':
values['params'] = TokenClassificationParamsAPI(**values['params'])
elif values.get('task').startswith('st:'):
values['params'] = SentenceTransformersParamsAPI(**values['params'])
elif values.get('task') == 'image-regression':
values['params'] = ImageRegressionParamsAPI(**values['params'])
elif values.get('task').startswith('vlm:'):
values['params'] = VLMTrainingParamsAPI(**values['params'])
elif values.get('task') == 'extractive-question-answering':
values['params'] = ExtractiveQuestionAnsweringParamsAPI(**values['params'])
return values
api_router = APIRouter()
def api_auth(request: Request):
authorization = request.headers.get('Authorization')
if authorization:
(schema, _, token) = authorization.partition(' ')
if schema.lower() == 'bearer':
token = token.strip()
try:
_ = token_verification(token=token)
return token
except Exception as e:
logger.error(f'Failed to verify token: {e}')
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid or expired token: Bearer')
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid or expired token')
@api_router.post('/create_project', response_class=JSONResponse)
async def api_create_project(project: APICreateProjectModel, token: bool=Depends(api_auth)):
provided_params = project.params.model_dump()
if project.hardware == 'local':
hardware = 'local-ui'
else:
hardware = project.hardware
logger.info(provided_params)
logger.info(project.column_mapping)
task = project.task
if task.startswith('llm'):
params = PARAMS['llm']
trainer = task.split(':')[1]
params.update({'trainer': trainer})
elif task.startswith('st:'):
params = PARAMS['st']
trainer = task.split(':')[1]
params.update({'trainer': trainer})
elif task.startswith('vlm:'):
params = PARAMS['vlm']
trainer = task.split(':')[1]
params.update({'trainer': trainer})
elif task.startswith('tabular'):
params = PARAMS['tabular']
else:
params = PARAMS[task]
params.update(provided_params)
app_params = AppParams(job_params_json=json.dumps(params), token=token, project_name=project.project_name, username=project.username, task=task, data_path=project.hub_dataset, base_model=project.base_model, column_mapping=project.column_mapping.model_dump() if project.column_mapping else None, using_hub_dataset=True, train_split=project.train_split, valid_split=project.valid_split, api=True)
params = app_params.munge()
project = AutoTrainProject(params=params, backend=hardware)
job_id = project.create()
return {'message': 'Project created', 'job_id': job_id, 'success': True}
@api_router.get('/version', response_class=JSONResponse)
async def api_version():
return {'version': __version__}
@api_router.get('/logs', response_class=JSONResponse)
async def api_logs(job_id: str, token: bool=Depends(api_auth)):
return {'logs': 'Not implemented yet', 'success': False, 'message': 'Not implemented yet'}
@api_router.get('/stop_training', response_class=JSONResponse)
async def api_stop_training(job_id: str, token: bool=Depends(api_auth)):
hf_api = HfApi(token=token)
try:
hf_api.pause_space(repo_id=job_id)
except Exception as e:
logger.error(f'Failed to stop training: {e}')
return {'message': f'Failed to stop training for {job_id}: {e}', 'success': False}
return {'message': f'Training stopped for {job_id}', 'success': True}
# File: autotrain-advanced-main/src/autotrain/app/app.py
import os
from fastapi import FastAPI, Request
from fastapi.responses import RedirectResponse
from fastapi.staticfiles import StaticFiles
from autotrain import __version__, logger
from autotrain.app.api_routes import api_router
from autotrain.app.oauth import attach_oauth
from autotrain.app.ui_routes import ui_router
logger.info('Starting AutoTrain...')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
app = FastAPI()
if 'SPACE_ID' in os.environ:
attach_oauth(app)
app.include_router(ui_router, prefix='/ui', include_in_schema=False)
app.include_router(api_router, prefix='/api')
static_path = os.path.join(BASE_DIR, 'static')
app.mount('/static', StaticFiles(directory=static_path), name='static')
logger.info(f'AutoTrain version: {__version__}')
logger.info('AutoTrain started successfully')
@app.get('/')
async def forward_to_ui(request: Request):
query_params = request.query_params
url = '/ui/'
if query_params:
url += f'?{query_params}'
return RedirectResponse(url=url)
# File: autotrain-advanced-main/src/autotrain/app/colab.py
import json
import os
import random
import string
import subprocess
import ipywidgets as widgets
import yaml
from autotrain.app.models import fetch_models
from autotrain.app.params import get_task_params
def generate_random_string():
prefix = 'autotrain'
part1 = ''.join(random.choices(string.ascii_lowercase + string.digits, k=5))
part2 = ''.join(random.choices(string.ascii_lowercase + string.digits, k=5))
return f'{prefix}-{part1}-{part2}'
def colab_app():
if not os.path.exists('data'):
os.makedirs('data')
MODEL_CHOICES = fetch_models()
TASK_NAMES = ['LLM SFT', 'LLM ORPO', 'LLM Generic', 'LLM DPO', 'LLM Reward', 'Text Classification', 'Text Regression', 'Sequence to Sequence', 'Token Classification', 'DreamBooth LoRA', 'Image Classification', 'Image Regression', 'Object Detection', 'Tabular Classification', 'Tabular Regression', 'ST Pair', 'ST Pair Classification', 'ST Pair Scoring', 'ST Triplet', 'ST Question Answering']
TASK_MAP = {'LLM SFT': 'llm:sft', 'LLM ORPO': 'llm:orpo', 'LLM Generic': 'llm:generic', 'LLM DPO': 'llm:dpo', 'LLM Reward': 'llm:reward', 'Text Classification': 'text-classification', 'Text Regression': 'text-regression', 'Sequence to Sequence': 'seq2seq', 'Token Classification': 'token-classification', 'DreamBooth LoRA': 'dreambooth', 'Image Classification': 'image-classification', 'Image Regression': 'image-regression', 'Object Detection': 'image-object-detection', 'Tabular Classification': 'tabular:classification', 'Tabular Regression': 'tabular:regression', 'ST Pair': 'st:pair', 'ST Pair Classification': 'st:pair_class', 'ST Pair Scoring': 'st:pair_score', 'ST Triplet': 'st:triplet', 'ST Question Answering': 'st:qa'}
def _get_params(task, param_type):
_p = get_task_params(task, param_type=param_type)
_p['push_to_hub'] = True
_p = json.dumps(_p, indent=4)
return _p
hf_token_label = widgets.HTML("
Hugging Face Write Token
")
hf_token = widgets.Password(value='', description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
hf_user_label = widgets.HTML("Hugging Face Username
")
hf_user = widgets.Text(value='', description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
base_model_label = widgets.HTML("Base Model
")
base_model = widgets.Text(value=MODEL_CHOICES['llm'][0], disabled=False, layout=widgets.Layout(width='420px'))
project_name_label = widgets.HTML("Project Name
")
project_name = widgets.Text(value=generate_random_string(), description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
task_dropdown_label = widgets.HTML("Task
")
task_dropdown = widgets.Dropdown(options=TASK_NAMES, value=TASK_NAMES[0], description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
dataset_path_label = widgets.HTML("Path
")
dataset_path = widgets.Text(value='', description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
train_split_label = widgets.HTML("Train Split
")
train_split = widgets.Text(value='', description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
valid_split_label = widgets.HTML("Valid Split
")
valid_split = widgets.Text(value='', placeholder='optional', description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
dataset_source_dropdown_label = widgets.HTML("Source
")
dataset_source_dropdown = widgets.Dropdown(options=['Hugging Face Hub', 'Local'], value='Hugging Face Hub', description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='200px'))
col_mapping_label = widgets.HTML("Column Mapping
")
col_mapping = widgets.Text(value='{"text": "text"}', placeholder='', description='', disabled=False, layout=widgets.Layout(margin='0 0 0 0', width='420px'))
parameters_dropdown = widgets.Dropdown(options=['Basic', 'Full'], value='Basic', description='', disabled=False, layout=widgets.Layout(width='400px'))
parameters = widgets.Textarea(value=_get_params('llm:sft', 'basic'), description='', disabled=False, layout=widgets.Layout(height='400px', width='400px'))
start_training_button = widgets.Button(description='Start Training', layout=widgets.Layout(width='1000px'), disabled=False, button_style='', tooltip='Click to start training', icon='check')
spacer = widgets.Box(layout=widgets.Layout(width='20px'))
title_hbox0 = widgets.HTML('Hugging Face Credentials
')
title_hbox1 = widgets.HTML('Project Details
')
title_hbox2 = widgets.HTML('Dataset Details
')
title_hbox3 = widgets.HTML('Parameters
')
hbox0 = widgets.HBox([widgets.VBox([hf_token_label, hf_token]), spacer, widgets.VBox([hf_user_label, hf_user])])
hbox1 = widgets.HBox([widgets.VBox([project_name_label, project_name]), spacer, widgets.VBox([task_dropdown_label, task_dropdown])])
hbox2_1 = widgets.HBox([widgets.VBox([dataset_source_dropdown_label, dataset_source_dropdown]), spacer, widgets.VBox([dataset_path_label, dataset_path])])
hbox2_2 = widgets.HBox([widgets.VBox([train_split_label, train_split]), spacer, widgets.VBox([valid_split_label, valid_split])])
hbox2_3 = widgets.HBox([widgets.VBox([col_mapping_label, col_mapping])])
hbox3 = widgets.VBox([parameters_dropdown, parameters])
vbox0 = widgets.VBox([title_hbox0, hbox0])
vbox1 = widgets.VBox([title_hbox1, base_model_label, base_model, hbox1])
vbox2 = widgets.VBox([title_hbox2, hbox2_1, hbox2_2, hbox2_3])
vbox3 = widgets.VBox([title_hbox3, hbox3])
left_column = widgets.VBox([vbox0, vbox1, vbox2], layout=widgets.Layout(width='500px'))
right_column = widgets.VBox([vbox3], layout=widgets.Layout(width='500px', align_items='flex-end'))
separator = widgets.HTML('')
_main_layout = widgets.HBox([left_column, separator, right_column])
main_layout = widgets.VBox([_main_layout, start_training_button])
def on_dataset_change(change):
if change['new'] == 'Local':
dataset_path.value = 'data/'
train_split.value = 'train'
valid_split.value = ''
else:
dataset_path.value = ''
train_split.value = ''
valid_split.value = ''
def update_parameters(*args):
task = TASK_MAP[task_dropdown.value]
param_type = parameters_dropdown.value.lower()
parameters.value = _get_params(task, param_type)
def update_col_mapping(*args):
task = TASK_MAP[task_dropdown.value]
if task in ['llm:sft', 'llm:generic']:
col_mapping.value = '{"text": "text"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = True
elif task in ['llm:dpo', 'llm:orpo']:
col_mapping.value = '{"prompt": "prompt", "text": "text", "rejected_text": "rejected_text"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = True
elif task == 'llm:reward':
col_mapping.value = '{"text": "text", "rejected_text": "rejected_text"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = True
elif task == 'text-classification':
col_mapping.value = '{"text": "text", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'text-regression':
col_mapping.value = '{"text": "text", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'token-classification':
col_mapping.value = '{"text": "tokens", "label": "tags"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'seq2seq':
col_mapping.value = '{"text": "text", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'dreambooth':
col_mapping.value = '{"image": "image"}'
dataset_source_dropdown.value = 'Local'
dataset_source_dropdown.disabled = True
valid_split.disabled = True
elif task == 'image-classification':
col_mapping.value = '{"image": "image", "label": "label"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'image-regression':
col_mapping.value = '{"image": "image", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'image-object-detection':
col_mapping.value = '{"image": "image", "objects": "objects"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'tabular:classification':
col_mapping.value = '{"id": "id", "label": ["target"]}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'tabular:regression':
col_mapping.value = '{"id": "id", "label": ["target"]}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'st:pair':
col_mapping.value = '{"sentence1": "anchor", "sentence2": "positive"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'st:pair_class':
col_mapping.value = '{"sentence1": "premise", "sentence2": "hypothesis", "target": "label"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'st:pair_score':
col_mapping.value = '{"sentence1": "sentence1", "sentence2": "sentence2", "target": "score"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'st:triplet':
col_mapping.value = '{"sentence1": "anchor", "sentence2": "positive", "sentence3": "negative"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == 'st:qa':
col_mapping.value = '{"sentence1": "query", "sentence1": "answer"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
else:
col_mapping.value = 'Enter column mapping...'
def update_base_model(*args):
if TASK_MAP[task_dropdown.value] == 'text-classification':
base_model.value = MODEL_CHOICES['text-classification'][0]
elif TASK_MAP[task_dropdown.value].startswith('llm'):
base_model.value = MODEL_CHOICES['llm'][0]
elif TASK_MAP[task_dropdown.value] == 'image-classification':
base_model.value = MODEL_CHOICES['image-classification'][0]
elif TASK_MAP[task_dropdown.value] == 'dreambooth':
base_model.value = MODEL_CHOICES['dreambooth'][0]
elif TASK_MAP[task_dropdown.value] == 'seq2seq':
base_model.value = MODEL_CHOICES['seq2seq'][0]
elif TASK_MAP[task_dropdown.value] == 'tabular:classification':
base_model.value = MODEL_CHOICES['tabular-classification'][0]
elif TASK_MAP[task_dropdown.value] == 'tabular:regression':
base_model.value = MODEL_CHOICES['tabular-regression'][0]
elif TASK_MAP[task_dropdown.value] == 'token-classification':
base_model.value = MODEL_CHOICES['token-classification'][0]
elif TASK_MAP[task_dropdown.value] == 'text-regression':
base_model.value = MODEL_CHOICES['text-regression'][0]
elif TASK_MAP[task_dropdown.value] == 'image-object-detection':
base_model.value = MODEL_CHOICES['image-object-detection'][0]
elif TASK_MAP[task_dropdown.value].startswith('st:'):
base_model.value = MODEL_CHOICES['sentence-transformers'][0]
else:
base_model.value = 'Enter base model...'
def start_training(b):
start_training_button.disabled = True
try:
print('Training is starting... Please wait!')
os.environ['HF_USERNAME'] = hf_user.value
os.environ['HF_TOKEN'] = hf_token.value
train_split_value = train_split.value.strip() if train_split.value.strip() != '' else None
valid_split_value = valid_split.value.strip() if valid_split.value.strip() != '' else None
params_val = json.loads(parameters.value)
if task_dropdown.value.startswith('llm') or task_dropdown.value.startswith('sentence-transformers'):
params_val['trainer'] = task_dropdown.value.split(':')[1]
chat_template = params_val.get('chat_template')
if chat_template is not None:
params_val = {k: v for (k, v) in params_val.items() if k != 'chat_template'}
if TASK_MAP[task_dropdown.value] == 'dreambooth':
prompt = params_val.get('prompt')
if prompt is None:
raise ValueError('Prompt is required for DreamBooth task')
if not isinstance(prompt, str):
raise ValueError('Prompt should be a string')
params_val = {k: v for (k, v) in params_val.items() if k != 'prompt'}
else:
prompt = None
push_to_hub = params_val.get('push_to_hub', True)
if 'push_to_hub' in params_val:
params_val = {k: v for (k, v) in params_val.items() if k != 'push_to_hub'}
if TASK_MAP[task_dropdown.value] != 'dreambooth':
config = {'task': TASK_MAP[task_dropdown.value].split(':')[0], 'base_model': base_model.value, 'project_name': project_name.value, 'log': 'tensorboard', 'backend': 'local', 'data': {'path': dataset_path.value, 'train_split': train_split_value, 'valid_split': valid_split_value, 'column_mapping': json.loads(col_mapping.value)}, 'params': params_val, 'hub': {'username': '${{HF_USERNAME}}', 'token': '${{HF_TOKEN}}', 'push_to_hub': push_to_hub}}
if TASK_MAP[task_dropdown.value].startswith('llm'):
config['data']['chat_template'] = chat_template
if config['data']['chat_template'] == 'none':
config['data']['chat_template'] = None
else:
config = {'task': TASK_MAP[task_dropdown.value], 'base_model': base_model.value, 'project_name': project_name.value, 'backend': 'local', 'data': {'path': dataset_path.value, 'prompt': prompt}, 'params': params_val, 'hub': {'username': '${HF_USERNAME}', 'token': '${HF_TOKEN}', 'push_to_hub': push_to_hub}}
with open('config.yml', 'w') as f:
yaml.dump(config, f)
cmd = 'autotrain --config config.yml'
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip())
poll_res = process.poll()
if poll_res != 0:
start_training_button.disabled = False
raise Exception(f'Training failed with exit code: {poll_res}')
print('Training completed successfully!')
start_training_button.disabled = False
except Exception as e:
print('An error occurred while starting training!')
print(f'Error: {e}')
start_training_button.disabled = False
start_training_button.on_click(start_training)
dataset_source_dropdown.observe(on_dataset_change, names='value')
task_dropdown.observe(update_col_mapping, names='value')
task_dropdown.observe(update_parameters, names='value')
task_dropdown.observe(update_base_model, names='value')
parameters_dropdown.observe(update_parameters, names='value')
return main_layout
# File: autotrain-advanced-main/src/autotrain/app/db.py
import sqlite3
class AutoTrainDB:
def __init__(self, db_path):
self.db_path = db_path
self.conn = sqlite3.connect(db_path)
self.c = self.conn.cursor()
self.create_jobs_table()
def create_jobs_table(self):
self.c.execute('CREATE TABLE IF NOT EXISTS jobs\n (id INTEGER PRIMARY KEY, pid INTEGER)')
self.conn.commit()
def add_job(self, pid):
sql = f'INSERT INTO jobs (pid) VALUES ({pid})'
self.c.execute(sql)
self.conn.commit()
def get_running_jobs(self):
self.c.execute('SELECT pid FROM jobs')
running_pids = self.c.fetchall()
running_pids = [pid[0] for pid in running_pids]
return running_pids
def delete_job(self, pid):
sql = f'DELETE FROM jobs WHERE pid={pid}'
self.c.execute(sql)
self.conn.commit()
# File: autotrain-advanced-main/src/autotrain/app/models.py
import collections
from huggingface_hub import list_models
def get_sorted_models(hub_models):
hub_models = [{'id': m.id, 'downloads': m.downloads} for m in hub_models if m.private is False]
hub_models = sorted(hub_models, key=lambda x: x['downloads'], reverse=True)
hub_models = [m['id'] for m in hub_models]
return hub_models
def _fetch_text_classification_models():
hub_models1 = list(list_models(task='fill-mask', library='transformers', sort='downloads', direction=-1, limit=100, full=False))
hub_models2 = list(list_models(task='text-classification', library='transformers', sort='downloads', direction=-1, limit=100, full=False))
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models = list(list_models(task='fill-mask', library='transformers', sort='likes7d', direction=-1, limit=30, full=False))
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_llm_models():
hub_models = list(list_models(task='text-generation', library='transformers', sort='downloads', direction=-1, limit=100, full=False))
hub_models = get_sorted_models(hub_models)
trending_models = list(list_models(task='text-generation', library='transformers', sort='likes7d', direction=-1, limit=30, full=False))
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_image_classification_models():
hub_models = list(list_models(task='image-classification', library='transformers', sort='downloads', direction=-1, limit=100, full=False))
hub_models = get_sorted_models(hub_models)
trending_models = list(list_models(task='image-classification', library='transformers', sort='likes7d', direction=-1, limit=30, full=False))
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_image_object_detection_models():
hub_models = list(list_models(task='object-detection', library='transformers', sort='downloads', direction=-1, limit=100, full=False, pipeline_tag='object-detection'))
hub_models = get_sorted_models(hub_models)
trending_models = list(list_models(task='object-detection', library='transformers', sort='likes7d', direction=-1, limit=30, full=False, pipeline_tag='object-detection'))
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_dreambooth_models():
hub_models1 = list(list_models(task='text-to-image', sort='downloads', direction=-1, limit=100, full=False, filter=['diffusers:StableDiffusionXLPipeline']))
hub_models2 = list(list_models(task='text-to-image', sort='downloads', direction=-1, limit=100, full=False, filter=['diffusers:StableDiffusionPipeline']))
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models1 = list(list_models(task='text-to-image', sort='likes7d', direction=-1, limit=30, full=False, filter=['diffusers:StableDiffusionXLPipeline']))
trending_models2 = list(list_models(task='text-to-image', sort='likes7d', direction=-1, limit=30, full=False, filter=['diffusers:StableDiffusionPipeline']))
trending_models = list(trending_models1) + list(trending_models2)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_seq2seq_models():
hub_models = list(list_models(task='text2text-generation', library='transformers', sort='downloads', direction=-1, limit=100, full=False))
hub_models = get_sorted_models(hub_models)
trending_models = list(list_models(task='text2text-generation', library='transformers', sort='likes7d', direction=-1, limit=30, full=False))
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_token_classification_models():
hub_models1 = list(list_models(task='fill-mask', library='transformers', sort='downloads', direction=-1, limit=100, full=False))
hub_models2 = list(list_models(task='token-classification', library='transformers', sort='downloads', direction=-1, limit=100, full=False))
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models = list(list_models(task='fill-mask', library='transformers', sort='likes7d', direction=-1, limit=30, full=False))
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_st_models():
hub_models1 = list(list_models(task='sentence-similarity', library='sentence-transformers', sort='downloads', direction=-1, limit=30, full=False))
hub_models2 = list(list_models(task='fill-mask', library='transformers', sort='downloads', direction=-1, limit=30, full=False))
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models = list(list_models(task='sentence-similarity', library='sentence-transformers', sort='likes7d', direction=-1, limit=30, full=False))
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_vlm_models():
hub_models1 = list(list_models(task='image-text-to-text', sort='downloads', direction=-1, limit=100, full=False, filter=['paligemma']))
hub_models2 = []
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models1 = list(list_models(task='image-text-to-text', sort='likes7d', direction=-1, limit=30, full=False, filter=['paligemma']))
trending_models2 = []
trending_models = list(trending_models1) + list(trending_models2)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def fetch_models():
_mc = collections.defaultdict(list)
_mc['text-classification'] = _fetch_text_classification_models()
_mc['llm'] = _fetch_llm_models()
_mc['image-classification'] = _fetch_image_classification_models()
_mc['image-regression'] = _fetch_image_classification_models()
_mc['dreambooth'] = _fetch_dreambooth_models()
_mc['seq2seq'] = _fetch_seq2seq_models()
_mc['token-classification'] = _fetch_token_classification_models()
_mc['text-regression'] = _fetch_text_classification_models()
_mc['image-object-detection'] = _fetch_image_object_detection_models()
_mc['sentence-transformers'] = _fetch_st_models()
_mc['vlm'] = _fetch_vlm_models()
_mc['extractive-qa'] = _fetch_text_classification_models()
_mc['tabular-classification'] = ['xgboost', 'random_forest', 'ridge', 'logistic_regression', 'svm', 'extra_trees', 'adaboost', 'decision_tree', 'knn']
_mc['tabular-regression'] = ['xgboost', 'random_forest', 'ridge', 'svm', 'extra_trees', 'adaboost', 'decision_tree', 'knn']
return _mc
# File: autotrain-advanced-main/src/autotrain/app/oauth.py
""""""
from __future__ import annotations
import hashlib
import os
import urllib.parse
import fastapi
from authlib.integrations.base_client.errors import MismatchingStateError
from authlib.integrations.starlette_client import OAuth
from fastapi.responses import RedirectResponse
from starlette.middleware.sessions import SessionMiddleware
OAUTH_CLIENT_ID = os.environ.get('OAUTH_CLIENT_ID')
OAUTH_CLIENT_SECRET = os.environ.get('OAUTH_CLIENT_SECRET')
OAUTH_SCOPES = os.environ.get('OAUTH_SCOPES')
OPENID_PROVIDER_URL = os.environ.get('OPENID_PROVIDER_URL')
def attach_oauth(app: fastapi.FastAPI):
_add_oauth_routes(app)
session_secret = OAUTH_CLIENT_SECRET + '-autotrain-v2'
app.add_middleware(SessionMiddleware, secret_key=hashlib.sha256(session_secret.encode()).hexdigest(), https_only=True, same_site='none')
def _add_oauth_routes(app: fastapi.FastAPI) -> None:
msg = "OAuth is required but {} environment variable is not set. Make sure you've enabled OAuth in your Space by setting `hf_oauth: true` in the Space metadata."
if OAUTH_CLIENT_ID is None:
raise ValueError(msg.format('OAUTH_CLIENT_ID'))
if OAUTH_CLIENT_SECRET is None:
raise ValueError(msg.format('OAUTH_CLIENT_SECRET'))
if OAUTH_SCOPES is None:
raise ValueError(msg.format('OAUTH_SCOPES'))
if OPENID_PROVIDER_URL is None:
raise ValueError(msg.format('OPENID_PROVIDER_URL'))
oauth = OAuth()
oauth.register(name='huggingface', client_id=OAUTH_CLIENT_ID, client_secret=OAUTH_CLIENT_SECRET, client_kwargs={'scope': OAUTH_SCOPES}, server_metadata_url=OPENID_PROVIDER_URL + '/.well-known/openid-configuration')
@app.get('/login/huggingface')
async def oauth_login(request: fastapi.Request):
redirect_uri = request.url_for('auth')
redirect_uri_as_str = str(redirect_uri)
if redirect_uri.netloc.endswith('.hf.space'):
redirect_uri_as_str = redirect_uri_as_str.replace('http://', 'https://')
return await oauth.huggingface.authorize_redirect(request, redirect_uri_as_str)
@app.get('/auth')
async def auth(request: fastapi.Request) -> RedirectResponse:
try:
oauth_info = await oauth.huggingface.authorize_access_token(request)
except MismatchingStateError:
login_uri = '/login/huggingface'
if '_target_url' in request.query_params:
login_uri += '?' + urllib.parse.urlencode({'_target_url': request.query_params['_target_url']})
for key in list(request.session.keys()):
if key.startswith('_state_huggingface'):
request.session.pop(key)
return RedirectResponse(login_uri)
request.session['oauth_info'] = oauth_info
return _redirect_to_target(request)
def _generate_redirect_uri(request: fastapi.Request) -> str:
if '_target_url' in request.query_params:
target = request.query_params['_target_url']
else:
target = '/?' + urllib.parse.urlencode(request.query_params)
redirect_uri = request.url_for('oauth_redirect_callback').include_query_params(_target_url=target)
redirect_uri_as_str = str(redirect_uri)
if redirect_uri.netloc.endswith('.hf.space'):
redirect_uri_as_str = redirect_uri_as_str.replace('http://', 'https://')
return redirect_uri_as_str
def _redirect_to_target(request: fastapi.Request, default_target: str='/') -> RedirectResponse:
target = request.query_params.get('_target_url', default_target)
return RedirectResponse(target)
# File: autotrain-advanced-main/src/autotrain/app/params.py
import json
from dataclasses import dataclass
from typing import Optional
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
HIDDEN_PARAMS = ['token', 'project_name', 'username', 'task', 'backend', 'train_split', 'valid_split', 'text_column', 'rejected_text_column', 'prompt_text_column', 'push_to_hub', 'trainer', 'model', 'data_path', 'image_path', 'class_image_path', 'revision', 'tokenizer', 'class_prompt', 'num_class_images', 'class_labels_conditioning', 'resume_from_checkpoint', 'dataloader_num_workers', 'allow_tf32', 'prior_generation_precision', 'local_rank', 'tokenizer_max_length', 'rank', 'xl', 'checkpoints_total_limit', 'validation_images', 'validation_epochs', 'num_validation_images', 'validation_prompt', 'sample_batch_size', 'log', 'image_column', 'target_column', 'id_column', 'target_columns', 'tokens_column', 'tags_column', 'objects_column', 'sentence1_column', 'sentence2_column', 'sentence3_column', 'question_column', 'answer_column']
PARAMS = {}
PARAMS['llm'] = LLMTrainingParams(target_modules='all-linear', log='tensorboard', mixed_precision='fp16', quantization='int4', peft=True, block_size=1024, epochs=3, padding='right', chat_template='none', max_completion_length=128).model_dump()
PARAMS['text-classification'] = TextClassificationParams(mixed_precision='fp16', log='tensorboard').model_dump()
PARAMS['st'] = SentenceTransformersParams(mixed_precision='fp16', log='tensorboard').model_dump()
PARAMS['image-classification'] = ImageClassificationParams(mixed_precision='fp16', log='tensorboard').model_dump()
PARAMS['image-object-detection'] = ObjectDetectionParams(mixed_precision='fp16', log='tensorboard').model_dump()
PARAMS['seq2seq'] = Seq2SeqParams(mixed_precision='fp16', target_modules='all-linear', log='tensorboard').model_dump()
PARAMS['tabular'] = TabularParams(categorical_imputer='most_frequent', numerical_imputer='median', numeric_scaler='robust').model_dump()
PARAMS['dreambooth'] = DreamBoothTrainingParams(prompt='', vae_model='', num_steps=500, disable_gradient_checkpointing=False, mixed_precision='fp16', batch_size=1, gradient_accumulation=4, resolution=1024, use_8bit_adam=False, xformers=False, train_text_encoder=False, lr=0.0001).model_dump()
PARAMS['token-classification'] = TokenClassificationParams(mixed_precision='fp16', log='tensorboard').model_dump()
PARAMS['text-regression'] = TextRegressionParams(mixed_precision='fp16', log='tensorboard').model_dump()
PARAMS['image-regression'] = ImageRegressionParams(mixed_precision='fp16', log='tensorboard').model_dump()
PARAMS['vlm'] = VLMTrainingParams(mixed_precision='fp16', target_modules='all-linear', log='tensorboard', quantization='int4', peft=True, epochs=3).model_dump()
PARAMS['extractive-qa'] = ExtractiveQuestionAnsweringParams(mixed_precision='fp16', log='tensorboard', max_seq_length=512, max_doc_stride=128).model_dump()
@dataclass
class AppParams:
job_params_json: str
token: str
project_name: str
username: str
task: str
data_path: str
base_model: str
column_mapping: dict
train_split: Optional[str] = None
valid_split: Optional[str] = None
using_hub_dataset: Optional[bool] = False
api: Optional[bool] = False
def __post_init__(self):
if self.using_hub_dataset and (not self.train_split):
raise ValueError('train_split is required when using a hub dataset')
def munge(self):
if self.task == 'text-classification':
return self._munge_params_text_clf()
elif self.task == 'seq2seq':
return self._munge_params_seq2seq()
elif self.task == 'image-classification':
return self._munge_params_img_clf()
elif self.task == 'image-object-detection':
return self._munge_params_img_obj_det()
elif self.task.startswith('tabular'):
return self._munge_params_tabular()
elif self.task == 'dreambooth':
return self._munge_params_dreambooth()
elif self.task.startswith('llm'):
return self._munge_params_llm()
elif self.task == 'token-classification':
return self._munge_params_token_clf()
elif self.task == 'text-regression':
return self._munge_params_text_reg()
elif self.task.startswith('st:'):
return self._munge_params_sent_transformers()
elif self.task == 'image-regression':
return self._munge_params_img_reg()
elif self.task.startswith('vlm'):
return self._munge_params_vlm()
elif self.task == 'extractive-qa':
return self._munge_params_extractive_qa()
else:
raise ValueError(f'Unknown task: {self.task}')
def _munge_common_params(self):
_params = json.loads(self.job_params_json)
_params['token'] = self.token
_params['project_name'] = f'{self.project_name}'
if 'push_to_hub' not in _params:
_params['push_to_hub'] = True
_params['data_path'] = self.data_path
_params['username'] = self.username
return _params
def _munge_params_sent_transformers(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['sentence1_column'] = 'autotrain_sentence1'
_params['sentence2_column'] = 'autotrain_sentence2'
_params['sentence3_column'] = 'autotrain_sentence3'
_params['target_column'] = 'autotrain_target'
_params['valid_split'] = 'validation'
else:
_params['sentence1_column'] = self.column_mapping.get('sentence1' if not self.api else 'sentence1_column', 'sentence1')
_params['sentence2_column'] = self.column_mapping.get('sentence2' if not self.api else 'sentence2_column', 'sentence2')
_params['sentence3_column'] = self.column_mapping.get('sentence3' if not self.api else 'sentence3_column', 'sentence3')
_params['target_column'] = self.column_mapping.get('target' if not self.api else 'target_column', 'target')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
trainer = self.task.split(':')[1]
_params['trainer'] = trainer.lower()
return SentenceTransformersParams(**_params)
def _munge_params_llm(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
if not self.using_hub_dataset:
_params['text_column'] = 'autotrain_text'
_params['prompt_text_column'] = 'autotrain_prompt'
_params['rejected_text_column'] = 'autotrain_rejected_text'
else:
_params['text_column'] = self.column_mapping.get('text' if not self.api else 'text_column', 'text')
_params['prompt_text_column'] = self.column_mapping.get('prompt' if not self.api else 'prompt_text_column', 'prompt')
_params['rejected_text_column'] = self.column_mapping.get('rejected_text' if not self.api else 'rejected_text_column', 'rejected_text')
_params['train_split'] = self.train_split
_params['log'] = 'tensorboard'
trainer = self.task.split(':')[1]
if trainer != 'generic':
_params['trainer'] = trainer.lower()
if 'quantization' in _params:
if _params['quantization'] in ('none', 'no'):
_params['quantization'] = None
return LLMTrainingParams(**_params)
def _munge_params_vlm(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
if not self.using_hub_dataset:
_params['text_column'] = 'autotrain_text'
_params['prompt_text_column'] = 'autotrain_prompt'
_params['image_column'] = 'autotrain_image'
_params['valid_split'] = 'validation'
else:
_params['text_column'] = self.column_mapping.get('text' if not self.api else 'text_column', 'text')
_params['prompt_text_column'] = self.column_mapping.get('prompt' if not self.api else 'prompt_text_column', 'prompt')
_params['image_column'] = self.column_mapping.get('image' if not self.api else 'rejected_text_column', 'image')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
_params['log'] = 'tensorboard'
trainer = self.task.split(':')[1]
_params['trainer'] = trainer.lower()
if 'quantization' in _params:
if _params['quantization'] in ('none', 'no'):
_params['quantization'] = None
return VLMTrainingParams(**_params)
def _munge_params_text_clf(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['text_column'] = 'autotrain_text'
_params['target_column'] = 'autotrain_label'
_params['valid_split'] = 'validation'
else:
_params['text_column'] = self.column_mapping.get('text' if not self.api else 'text_column', 'text')
_params['target_column'] = self.column_mapping.get('label' if not self.api else 'target_column', 'label')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return TextClassificationParams(**_params)
def _munge_params_extractive_qa(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['text_column'] = 'autotrain_text'
_params['question_column'] = 'autotrain_question'
_params['answer_column'] = 'autotrain_answer'
_params['valid_split'] = 'validation'
else:
_params['text_column'] = self.column_mapping.get('text' if not self.api else 'text_column', 'text')
_params['question_column'] = self.column_mapping.get('question' if not self.api else 'question_column', 'question')
_params['answer_column'] = self.column_mapping.get('answer' if not self.api else 'answer_column', 'answer')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return ExtractiveQuestionAnsweringParams(**_params)
def _munge_params_text_reg(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['text_column'] = 'autotrain_text'
_params['target_column'] = 'autotrain_label'
_params['valid_split'] = 'validation'
else:
_params['text_column'] = self.column_mapping.get('text' if not self.api else 'text_column', 'text')
_params['target_column'] = self.column_mapping.get('label' if not self.api else 'target_column', 'label')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return TextRegressionParams(**_params)
def _munge_params_token_clf(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['tokens_column'] = 'autotrain_text'
_params['tags_column'] = 'autotrain_label'
_params['valid_split'] = 'validation'
else:
_params['tokens_column'] = self.column_mapping.get('text' if not self.api else 'tokens_column', 'text')
_params['tags_column'] = self.column_mapping.get('label' if not self.api else 'tags_column', 'label')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return TokenClassificationParams(**_params)
def _munge_params_seq2seq(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['text_column'] = 'autotrain_text'
_params['target_column'] = 'autotrain_label'
_params['valid_split'] = 'validation'
else:
_params['text_column'] = self.column_mapping.get('text' if not self.api else 'text_column', 'text')
_params['target_column'] = self.column_mapping.get('label' if not self.api else 'target_column', 'label')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return Seq2SeqParams(**_params)
def _munge_params_img_clf(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['image_column'] = 'autotrain_image'
_params['target_column'] = 'autotrain_label'
_params['valid_split'] = 'validation'
else:
_params['image_column'] = self.column_mapping.get('image' if not self.api else 'image_column', 'image')
_params['target_column'] = self.column_mapping.get('label' if not self.api else 'target_column', 'label')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return ImageClassificationParams(**_params)
def _munge_params_img_reg(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['image_column'] = 'autotrain_image'
_params['target_column'] = 'autotrain_label'
_params['valid_split'] = 'validation'
else:
_params['image_column'] = self.column_mapping.get('image' if not self.api else 'image_column', 'image')
_params['target_column'] = self.column_mapping.get('target' if not self.api else 'target_column', 'target')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return ImageRegressionParams(**_params)
def _munge_params_img_obj_det(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['log'] = 'tensorboard'
if not self.using_hub_dataset:
_params['image_column'] = 'autotrain_image'
_params['objects_column'] = 'autotrain_objects'
_params['valid_split'] = 'validation'
else:
_params['image_column'] = self.column_mapping.get('image' if not self.api else 'image_column', 'image')
_params['objects_column'] = self.column_mapping.get('objects' if not self.api else 'objects_column', 'objects')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
return ObjectDetectionParams(**_params)
def _munge_params_tabular(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
if not self.using_hub_dataset:
_params['id_column'] = 'autotrain_id'
_params['valid_split'] = 'validation'
if len(self.column_mapping['label']) == 1:
_params['target_columns'] = ['autotrain_label']
else:
_params['target_columns'] = ['autotrain_label_' + str(i) for i in range(len(self.column_mapping['label']))]
else:
_params['id_column'] = self.column_mapping.get('id' if not self.api else 'id_column', 'id')
_params['train_split'] = self.train_split
_params['valid_split'] = self.valid_split
_params['target_columns'] = self.column_mapping.get('label' if not self.api else 'target_columns', 'label')
if len(_params['categorical_imputer'].strip()) == 0 or _params['categorical_imputer'].lower() == 'none':
_params['categorical_imputer'] = None
if len(_params['numerical_imputer'].strip()) == 0 or _params['numerical_imputer'].lower() == 'none':
_params['numerical_imputer'] = None
if len(_params['numeric_scaler'].strip()) == 0 or _params['numeric_scaler'].lower() == 'none':
_params['numeric_scaler'] = None
if 'classification' in self.task:
_params['task'] = 'classification'
else:
_params['task'] = 'regression'
return TabularParams(**_params)
def _munge_params_dreambooth(self):
_params = self._munge_common_params()
_params['model'] = self.base_model
_params['image_path'] = self.data_path
if 'weight_decay' in _params:
_params['adam_weight_decay'] = _params['weight_decay']
_params.pop('weight_decay')
return DreamBoothTrainingParams(**_params)
def get_task_params(task, param_type):
if task.startswith('llm'):
trainer = task.split(':')[1].lower()
task = task.split(':')[0].lower()
if task.startswith('st:'):
trainer = task.split(':')[1].lower()
task = task.split(':')[0].lower()
if task.startswith('vlm:'):
trainer = task.split(':')[1].lower()
task = task.split(':')[0].lower()
if task.startswith('tabular'):
task = 'tabular'
if task not in PARAMS:
return {}
task_params = PARAMS[task]
task_params = {k: v for (k, v) in task_params.items() if k not in HIDDEN_PARAMS}
if task == 'llm':
more_hidden_params = []
if trainer == 'sft':
more_hidden_params = ['model_ref', 'dpo_beta', 'add_eos_token', 'max_prompt_length', 'max_completion_length']
elif trainer == 'reward':
more_hidden_params = ['model_ref', 'dpo_beta', 'add_eos_token', 'max_prompt_length', 'max_completion_length', 'unsloth']
elif trainer == 'orpo':
more_hidden_params = ['model_ref', 'dpo_beta', 'add_eos_token', 'unsloth']
elif trainer == 'generic':
more_hidden_params = ['model_ref', 'dpo_beta', 'max_prompt_length', 'max_completion_length']
elif trainer == 'dpo':
more_hidden_params = ['add_eos_token', 'unsloth']
if param_type == 'basic':
more_hidden_params.extend(['padding', 'use_flash_attention_2', 'disable_gradient_checkpointing', 'logging_steps', 'eval_strategy', 'save_total_limit', 'auto_find_batch_size', 'warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'quantization', 'merge_adapter', 'lora_r', 'lora_alpha', 'lora_dropout', 'max_completion_length'])
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'text-classification' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'extractive-qa' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'st' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'vlm' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold', 'quantization', 'lora_r', 'lora_alpha', 'lora_dropout']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'text-regression' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'image-classification' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'image-regression' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'image-object-detection' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'seq2seq' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'quantization', 'lora_r', 'lora_alpha', 'lora_dropout', 'target_modules', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'token-classification' and param_type == 'basic':
more_hidden_params = ['warmup_ratio', 'weight_decay', 'max_grad_norm', 'seed', 'logging_steps', 'auto_find_batch_size', 'save_total_limit', 'eval_strategy', 'early_stopping_patience', 'early_stopping_threshold']
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
if task == 'dreambooth':
more_hidden_params = ['epochs', 'logging', 'bf16']
if param_type == 'basic':
more_hidden_params.extend(['prior_preservation', 'prior_loss_weight', 'seed', 'center_crop', 'train_text_encoder', 'disable_gradient_checkpointing', 'scale_lr', 'warmup_steps', 'num_cycles', 'lr_power', 'adam_beta1', 'adam_beta2', 'adam_weight_decay', 'adam_epsilon', 'max_grad_norm', 'pre_compute_text_embeddings', 'text_encoder_use_attention_mask'])
task_params = {k: v for (k, v) in task_params.items() if k not in more_hidden_params}
return task_params
# File: autotrain-advanced-main/src/autotrain/app/training_api.py
import asyncio
import os
import signal
import sys
from contextlib import asynccontextmanager
from fastapi import FastAPI
from autotrain import logger
from autotrain.app.db import AutoTrainDB
from autotrain.app.utils import get_running_jobs, kill_process_by_pid
from autotrain.utils import run_training
HF_TOKEN = os.environ.get('HF_TOKEN')
AUTOTRAIN_USERNAME = os.environ.get('AUTOTRAIN_USERNAME')
PROJECT_NAME = os.environ.get('PROJECT_NAME')
TASK_ID = int(os.environ.get('TASK_ID'))
PARAMS = os.environ.get('PARAMS')
DATA_PATH = os.environ.get('DATA_PATH')
MODEL = os.environ.get('MODEL')
DB = AutoTrainDB('autotrain.db')
def graceful_exit(signum, frame):
logger.info('SIGTERM received. Performing cleanup...')
sys.exit(0)
signal.signal(signal.SIGTERM, graceful_exit)
class BackgroundRunner:
async def run_main(self):
while True:
running_jobs = get_running_jobs(DB)
if not running_jobs:
logger.info('No running jobs found. Shutting down the server.')
kill_process_by_pid(os.getpid())
await asyncio.sleep(30)
runner = BackgroundRunner()
@asynccontextmanager
async def lifespan(app: FastAPI):
process_pid = run_training(params=PARAMS, task_id=TASK_ID)
logger.info(f'Started training with PID {process_pid}')
DB.add_job(process_pid)
task = asyncio.create_task(runner.run_main())
yield
task.cancel()
try:
await task
except asyncio.CancelledError:
logger.info('Background runner task cancelled.')
api = FastAPI(lifespan=lifespan)
logger.info(f'AUTOTRAIN_USERNAME: {AUTOTRAIN_USERNAME}')
logger.info(f'PROJECT_NAME: {PROJECT_NAME}')
logger.info(f'TASK_ID: {TASK_ID}')
logger.info(f'DATA_PATH: {DATA_PATH}')
logger.info(f'MODEL: {MODEL}')
@api.get('/')
async def root():
return 'Your model is being trained...'
@api.get('/health')
async def health():
return 'OK'
# File: autotrain-advanced-main/src/autotrain/app/ui_routes.py
import json
import os
import signal
import sys
import time
from typing import List
import torch
from fastapi import APIRouter, Depends, File, Form, HTTPException, Query, Request, UploadFile, status
from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse
from fastapi.templating import Jinja2Templates
from huggingface_hub import repo_exists
from nvitop import Device
from autotrain import __version__, logger
from autotrain.app.db import AutoTrainDB
from autotrain.app.models import fetch_models
from autotrain.app.params import AppParams, get_task_params
from autotrain.app.utils import get_running_jobs, get_user_and_orgs, kill_process_by_pid, token_verification
from autotrain.dataset import AutoTrainDataset, AutoTrainDreamboothDataset, AutoTrainImageClassificationDataset, AutoTrainImageRegressionDataset, AutoTrainObjectDetectionDataset, AutoTrainVLMDataset
from autotrain.help import get_app_help
from autotrain.project import AutoTrainProject
logger.info('Starting AutoTrain...')
HF_TOKEN = os.environ.get('HF_TOKEN', None)
IS_RUNNING_IN_SPACE = 'SPACE_ID' in os.environ
ENABLE_NGC = int(os.environ.get('ENABLE_NGC', 0))
ENABLE_NVCF = int(os.environ.get('ENABLE_NVCF', 0))
AUTOTRAIN_LOCAL = int(os.environ.get('AUTOTRAIN_LOCAL', 1))
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DB = AutoTrainDB('autotrain.db')
MODEL_CHOICE = fetch_models()
ui_router = APIRouter()
templates_path = os.path.join(BASE_DIR, 'templates')
templates = Jinja2Templates(directory=templates_path)
UI_PARAMS = {'mixed_precision': {'type': 'dropdown', 'label': 'Mixed precision', 'options': ['fp16', 'bf16', 'none']}, 'optimizer': {'type': 'dropdown', 'label': 'Optimizer', 'options': ['adamw_torch', 'adamw', 'adam', 'sgd']}, 'scheduler': {'type': 'dropdown', 'label': 'Scheduler', 'options': ['linear', 'cosine', 'cosine_warmup', 'constant']}, 'eval_strategy': {'type': 'dropdown', 'label': 'Evaluation strategy', 'options': ['epoch', 'steps']}, 'logging_steps': {'type': 'number', 'label': 'Logging steps'}, 'save_total_limit': {'type': 'number', 'label': 'Save total limit'}, 'auto_find_batch_size': {'type': 'dropdown', 'label': 'Auto find batch size', 'options': [True, False]}, 'warmup_ratio': {'type': 'number', 'label': 'Warmup proportion'}, 'max_grad_norm': {'type': 'number', 'label': 'Max grad norm'}, 'weight_decay': {'type': 'number', 'label': 'Weight decay'}, 'epochs': {'type': 'number', 'label': 'Epochs'}, 'batch_size': {'type': 'number', 'label': 'Batch size'}, 'lr': {'type': 'number', 'label': 'Learning rate'}, 'seed': {'type': 'number', 'label': 'Seed'}, 'gradient_accumulation': {'type': 'number', 'label': 'Gradient accumulation'}, 'block_size': {'type': 'number', 'label': 'Block size'}, 'model_max_length': {'type': 'number', 'label': 'Model max length'}, 'add_eos_token': {'type': 'dropdown', 'label': 'Add EOS token', 'options': [True, False]}, 'disable_gradient_checkpointing': {'type': 'dropdown', 'label': 'Disable GC', 'options': [True, False]}, 'use_flash_attention_2': {'type': 'dropdown', 'label': 'Use flash attention', 'options': [True, False]}, 'log': {'type': 'dropdown', 'label': 'Logging', 'options': ['tensorboard', 'none']}, 'quantization': {'type': 'dropdown', 'label': 'Quantization', 'options': ['int4', 'int8', 'none']}, 'target_modules': {'type': 'string', 'label': 'Target modules'}, 'merge_adapter': {'type': 'dropdown', 'label': 'Merge adapter', 'options': [True, False]}, 'peft': {'type': 'dropdown', 'label': 'PEFT/LoRA', 'options': [True, False]}, 'lora_r': {'type': 'number', 'label': 'Lora r'}, 'lora_alpha': {'type': 'number', 'label': 'Lora alpha'}, 'lora_dropout': {'type': 'number', 'label': 'Lora dropout'}, 'model_ref': {'type': 'string', 'label': 'Reference model'}, 'dpo_beta': {'type': 'number', 'label': 'DPO beta'}, 'max_prompt_length': {'type': 'number', 'label': 'Prompt length'}, 'max_completion_length': {'type': 'number', 'label': 'Completion length'}, 'chat_template': {'type': 'dropdown', 'label': 'Chat template', 'options': ['none', 'zephyr', 'chatml', 'tokenizer']}, 'padding': {'type': 'dropdown', 'label': 'Padding side', 'options': ['right', 'left', 'none']}, 'max_seq_length': {'type': 'number', 'label': 'Max sequence length'}, 'early_stopping_patience': {'type': 'number', 'label': 'Early stopping patience'}, 'early_stopping_threshold': {'type': 'number', 'label': 'Early stopping threshold'}, 'max_target_length': {'type': 'number', 'label': 'Max target length'}, 'categorical_columns': {'type': 'string', 'label': 'Categorical columns'}, 'numerical_columns': {'type': 'string', 'label': 'Numerical columns'}, 'num_trials': {'type': 'number', 'label': 'Number of trials'}, 'time_limit': {'type': 'number', 'label': 'Time limit'}, 'categorical_imputer': {'type': 'dropdown', 'label': 'Categorical imputer', 'options': ['most_frequent', 'none']}, 'numerical_imputer': {'type': 'dropdown', 'label': 'Numerical imputer', 'options': ['mean', 'median', 'none']}, 'numeric_scaler': {'type': 'dropdown', 'label': 'Numeric scaler', 'options': ['standard', 'minmax', 'maxabs', 'robust', 'none']}, 'vae_model': {'type': 'string', 'label': 'VAE model'}, 'prompt': {'type': 'string', 'label': 'Prompt'}, 'resolution': {'type': 'number', 'label': 'Resolution'}, 'num_steps': {'type': 'number', 'label': 'Number of steps'}, 'checkpointing_steps': {'type': 'number', 'label': 'Checkpointing steps'}, 'use_8bit_adam': {'type': 'dropdown', 'label': 'Use 8-bit Adam', 'options': [True, False]}, 'xformers': {'type': 'dropdown', 'label': 'xFormers', 'options': [True, False]}, 'image_square_size': {'type': 'number', 'label': 'Image square size'}, 'unsloth': {'type': 'dropdown', 'label': 'Unsloth', 'options': [True, False]}, 'max_doc_stride': {'type': 'number', 'label': 'Max doc stride'}}
def graceful_exit(signum, frame):
logger.info('SIGTERM received. Performing cleanup...')
sys.exit(0)
signal.signal(signal.SIGTERM, graceful_exit)
logger.info('AutoTrain started successfully')
def user_authentication(request: Request):
if HF_TOKEN is not None:
try:
_ = token_verification(token=os.environ.get('HF_TOKEN'))
return HF_TOKEN
except Exception as e:
logger.error(f'Failed to verify token: {e}')
if IS_RUNNING_IN_SPACE:
return templates.TemplateResponse('login.html', {'request': request})
else:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid or expired token: HF_TOKEN')
if IS_RUNNING_IN_SPACE and 'oauth_info' in request.session:
try:
_ = token_verification(token=request.session['oauth_info']['access_token'])
return request.session['oauth_info']['access_token']
except Exception as e:
request.session.pop('oauth_info', None)
logger.error(f'Failed to verify token: {e}')
return templates.TemplateResponse('login.html', {'request': request})
if IS_RUNNING_IN_SPACE:
return templates.TemplateResponse('login.html', {'request': request})
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='Invalid or expired token')
@ui_router.get('/', response_class=HTMLResponse)
async def load_index(request: Request, token: str=Depends(user_authentication)):
if os.environ.get('SPACE_ID') == 'autotrain-projects/autotrain-advanced':
return templates.TemplateResponse('duplicate.html', {'request': request})
try:
_users = get_user_and_orgs(user_token=token)
except Exception as e:
logger.error(f'Failed to get user and orgs: {e}')
if 'oauth_info' in request.session:
request.session.pop('oauth_info', None)
return templates.TemplateResponse('login.html', {'request': request})
context = {'request': request, 'valid_users': _users, 'enable_ngc': ENABLE_NGC, 'enable_nvcf': ENABLE_NVCF, 'enable_local': AUTOTRAIN_LOCAL, 'version': __version__, 'time': time.strftime('%Y-%m-%d %H:%M:%S')}
return templates.TemplateResponse('index.html', context)
@ui_router.get('/logout', response_class=HTMLResponse)
async def oauth_logout(request: Request, authenticated: bool=Depends(user_authentication)):
request.session.pop('oauth_info', None)
return RedirectResponse('/')
@ui_router.get('/params/{task}/{param_type}', response_class=JSONResponse)
async def fetch_params(task: str, param_type: str, authenticated: bool=Depends(user_authentication)):
logger.info(f'Task: {task}')
task_params = get_task_params(task, param_type)
if len(task_params) == 0:
return {'error': 'Task not found'}
ui_params = {}
for param in task_params:
if param in UI_PARAMS:
ui_params[param] = UI_PARAMS[param]
ui_params[param]['default'] = task_params[param]
else:
logger.info(f'Param {param} not found in UI_PARAMS')
ui_params = dict(sorted(ui_params.items(), key=lambda x: (x[1]['type'], x[1]['label'])))
return ui_params
@ui_router.get('/model_choices/{task}', response_class=JSONResponse)
async def fetch_model_choices(task: str, custom_models: str=Query(None), authenticated: bool=Depends(user_authentication)):
resp = []
if custom_models is not None:
custom_models = custom_models.split(',')
for custom_model in custom_models:
custom_model = custom_model.strip()
resp.append({'id': custom_model, 'name': custom_model})
if os.environ.get('AUTOTRAIN_CUSTOM_MODELS', None) is not None:
custom_models = os.environ.get('AUTOTRAIN_CUSTOM_MODELS')
custom_models = custom_models.split(',')
for custom_model in custom_models:
custom_model = custom_model.strip()
resp.append({'id': custom_model, 'name': custom_model})
if task == 'text-classification':
hub_models = MODEL_CHOICE['text-classification']
elif task.startswith('llm'):
hub_models = MODEL_CHOICE['llm']
elif task.startswith('st:'):
hub_models = MODEL_CHOICE['sentence-transformers']
elif task == 'image-classification':
hub_models = MODEL_CHOICE['image-classification']
elif task == 'dreambooth':
hub_models = MODEL_CHOICE['dreambooth']
elif task == 'seq2seq':
hub_models = MODEL_CHOICE['seq2seq']
elif task == 'tabular:classification':
hub_models = MODEL_CHOICE['tabular-classification']
elif task == 'tabular:regression':
hub_models = MODEL_CHOICE['tabular-regression']
elif task == 'token-classification':
hub_models = MODEL_CHOICE['token-classification']
elif task == 'text-regression':
hub_models = MODEL_CHOICE['text-regression']
elif task == 'image-object-detection':
hub_models = MODEL_CHOICE['image-object-detection']
elif task == 'image-regression':
hub_models = MODEL_CHOICE['image-regression']
elif task.startswith('vlm:'):
hub_models = MODEL_CHOICE['vlm']
elif task == 'extractive-qa':
hub_models = MODEL_CHOICE['extractive-qa']
else:
raise NotImplementedError
for hub_model in hub_models:
resp.append({'id': hub_model, 'name': hub_model})
return resp
@ui_router.post('/create_project', response_class=JSONResponse)
async def handle_form(project_name: str=Form(...), task: str=Form(...), base_model: str=Form(...), hardware: str=Form(...), params: str=Form(...), autotrain_user: str=Form(...), column_mapping: str=Form('{"default": "value"}'), data_files_training: List[UploadFile]=File(None), data_files_valid: List[UploadFile]=File(None), hub_dataset: str=Form(''), train_split: str=Form(''), valid_split: str=Form(''), token: str=Depends(user_authentication)):
train_split = train_split.strip()
if len(train_split) == 0:
train_split = None
valid_split = valid_split.strip()
if len(valid_split) == 0:
valid_split = None
logger.info(f'hardware: {hardware}')
if hardware == 'local-ui':
running_jobs = get_running_jobs(DB)
if running_jobs:
raise HTTPException(status_code=409, detail='Another job is already running. Please wait for it to finish.')
if repo_exists(f'{autotrain_user}/{project_name}', token=token):
raise HTTPException(status_code=409, detail=f'Project {project_name} already exists. Please choose a different name.')
params = json.loads(params)
for key in params:
if params[key] == 'null':
params[key] = None
column_mapping = json.loads(column_mapping)
training_files = [f.file for f in data_files_training if f.filename != ''] if data_files_training else []
validation_files = [f.file for f in data_files_valid if f.filename != ''] if data_files_valid else []
if len(training_files) > 0 and len(hub_dataset) > 0:
raise HTTPException(status_code=400, detail='Please either upload a dataset or choose a dataset from the Hugging Face Hub.')
if len(training_files) == 0 and len(hub_dataset) == 0:
raise HTTPException(status_code=400, detail='Please upload a dataset or choose a dataset from the Hugging Face Hub.')
if len(hub_dataset) > 0 and task == 'dreambooth':
raise HTTPException(status_code=400, detail='Dreambooth does not support Hugging Face Hub datasets.')
if len(hub_dataset) > 0:
if not train_split:
raise HTTPException(status_code=400, detail='Please enter a training split.')
if len(hub_dataset) == 0:
file_extension = os.path.splitext(data_files_training[0].filename)[1]
file_extension = file_extension[1:] if file_extension.startswith('.') else file_extension
if task == 'image-classification':
dset = AutoTrainImageClassificationDataset(train_data=training_files[0], token=token, project_name=project_name, username=autotrain_user, valid_data=validation_files[0] if validation_files else None, percent_valid=None, local=hardware.lower() == 'local-ui')
elif task == 'image-regression':
dset = AutoTrainImageRegressionDataset(train_data=training_files[0], token=token, project_name=project_name, username=autotrain_user, valid_data=validation_files[0] if validation_files else None, percent_valid=None, local=hardware.lower() == 'local-ui')
elif task == 'image-object-detection':
dset = AutoTrainObjectDetectionDataset(train_data=training_files[0], token=token, project_name=project_name, username=autotrain_user, valid_data=validation_files[0] if validation_files else None, percent_valid=None, local=hardware.lower() == 'local-ui')
elif task == 'dreambooth':
dset = AutoTrainDreamboothDataset(concept_images=data_files_training, concept_name=params['prompt'], token=token, project_name=project_name, username=autotrain_user, local=hardware.lower() == 'local-ui')
elif task.startswith('vlm:'):
dset = AutoTrainVLMDataset(train_data=training_files[0], token=token, project_name=project_name, username=autotrain_user, column_mapping=column_mapping, valid_data=validation_files[0] if validation_files else None, percent_valid=None, local=hardware.lower() == 'local-ui')
else:
if task.startswith('llm'):
dset_task = 'lm_training'
elif task.startswith('st:'):
dset_task = 'sentence_transformers'
elif task == 'text-classification':
dset_task = 'text_multi_class_classification'
elif task == 'text-regression':
dset_task = 'text_single_column_regression'
elif task == 'seq2seq':
dset_task = 'seq2seq'
elif task.startswith('tabular'):
if ',' in column_mapping['label']:
column_mapping['label'] = column_mapping['label'].split(',')
else:
column_mapping['label'] = [column_mapping['label']]
column_mapping['label'] = [col.strip() for col in column_mapping['label']]
subtask = task.split(':')[-1].lower()
if len(column_mapping['label']) > 1 and subtask == 'classification':
dset_task = 'tabular_multi_label_classification'
elif len(column_mapping['label']) == 1 and subtask == 'classification':
dset_task = 'tabular_multi_class_classification'
elif len(column_mapping['label']) > 1 and subtask == 'regression':
dset_task = 'tabular_multi_column_regression'
elif len(column_mapping['label']) == 1 and subtask == 'regression':
dset_task = 'tabular_single_column_regression'
else:
raise NotImplementedError
elif task == 'token-classification':
dset_task = 'text_token_classification'
elif task == 'extractive-qa':
dset_task = 'text_extractive_question_answering'
else:
raise NotImplementedError
logger.info(f'Task: {dset_task}')
logger.info(f'Column mapping: {column_mapping}')
dset_args = dict(train_data=training_files, task=dset_task, token=token, project_name=project_name, username=autotrain_user, column_mapping=column_mapping, valid_data=validation_files, percent_valid=None, local=hardware.lower() == 'local-ui', ext=file_extension)
if task in ('text-classification', 'token-classification', 'st:pair_class'):
dset_args['convert_to_class_label'] = True
dset = AutoTrainDataset(**dset_args)
data_path = dset.prepare()
else:
data_path = hub_dataset
app_params = AppParams(job_params_json=json.dumps(params), token=token, project_name=project_name, username=autotrain_user, task=task, data_path=data_path, base_model=base_model, column_mapping=column_mapping, using_hub_dataset=len(hub_dataset) > 0, train_split=None if len(hub_dataset) == 0 else train_split, valid_split=None if len(hub_dataset) == 0 else valid_split)
params = app_params.munge()
project = AutoTrainProject(params=params, backend=hardware)
job_id = project.create()
monitor_url = ''
if hardware == 'local-ui':
DB.add_job(job_id)
monitor_url = 'Monitor your job locally / in logs'
elif hardware.startswith('ep-'):
monitor_url = f'https://ui.endpoints.huggingface.co/{autotrain_user}/endpoints/{job_id}'
elif hardware.startswith('spaces-'):
monitor_url = f'https://hf.co/spaces/{job_id}'
else:
monitor_url = f'Success! Monitor your job in logs. Job ID: {job_id}'
return {'success': 'true', 'monitor_url': monitor_url}
@ui_router.get('/help/{element_id}', response_class=JSONResponse)
async def fetch_help(element_id: str, authenticated: bool=Depends(user_authentication)):
msg = get_app_help(element_id)
return {'message': msg}
@ui_router.get('/accelerators', response_class=JSONResponse)
async def available_accelerators(authenticated: bool=Depends(user_authentication)):
if AUTOTRAIN_LOCAL == 0:
return {'accelerators': 'Not available in cloud mode.'}
cuda_available = torch.cuda.is_available()
mps_available = torch.backends.mps.is_available()
if cuda_available:
num_gpus = torch.cuda.device_count()
elif mps_available:
num_gpus = 1
else:
num_gpus = 0
return {'accelerators': num_gpus}
@ui_router.get('/is_model_training', response_class=JSONResponse)
async def is_model_training(authenticated: bool=Depends(user_authentication)):
if AUTOTRAIN_LOCAL == 0:
return {'model_training': 'Not available in cloud mode.'}
running_jobs = get_running_jobs(DB)
if running_jobs:
return {'model_training': True, 'pids': running_jobs}
return {'model_training': False, 'pids': []}
@ui_router.get('/logs', response_class=JSONResponse)
async def fetch_logs(authenticated: bool=Depends(user_authentication)):
if not AUTOTRAIN_LOCAL:
return {'logs': 'Logs are only available in local mode.'}
log_file = 'autotrain.log'
with open(log_file, 'r', encoding='utf-8') as f:
logs = f.read()
if len(str(logs).strip()) == 0:
logs = 'No logs available.'
logs = logs.split('\n')
logs = logs[::-1]
logs = [log for log in logs if '/ui/' not in log and '/static/' not in log and ('nvidia-ml-py' not in log)]
cuda_available = torch.cuda.is_available()
if cuda_available:
devices = Device.all()
device_logs = []
for device in devices:
device_logs.append(f'Device {device.index}: {device.name()} - {device.memory_used_human()}/{device.memory_total_human()}')
device_logs.append('-----------------')
logs = device_logs + logs
return {'logs': logs}
@ui_router.get('/stop_training', response_class=JSONResponse)
async def stop_training(authenticated: bool=Depends(user_authentication)):
running_jobs = get_running_jobs(DB)
if running_jobs:
for _pid in running_jobs:
try:
kill_process_by_pid(_pid)
except Exception:
logger.info(f'Process {_pid} is already completed. Skipping...')
return {'success': True}
return {'success': False}
# File: autotrain-advanced-main/src/autotrain/app/utils.py
import os
import signal
import sys
import psutil
import requests
from autotrain import config, logger
def graceful_exit(signum, frame):
logger.info('SIGTERM received. Performing cleanup...')
sys.exit(0)
signal.signal(signal.SIGTERM, graceful_exit)
def get_running_jobs(db):
running_jobs = db.get_running_jobs()
if running_jobs:
for _pid in running_jobs:
proc_status = get_process_status(_pid)
proc_status = proc_status.strip().lower()
if proc_status in ('completed', 'error', 'zombie'):
logger.info(f'Killing PID: {_pid}')
try:
kill_process_by_pid(_pid)
except Exception as e:
logger.info(f'Error while killing process: {e}')
logger.info(f'Process {_pid} is already completed. Skipping...')
db.delete_job(_pid)
running_jobs = db.get_running_jobs()
return running_jobs
def get_process_status(pid):
try:
process = psutil.Process(pid)
proc_status = process.status()
return proc_status
except psutil.NoSuchProcess:
logger.info(f'No process found with PID: {pid}')
return 'Completed'
def kill_process_by_pid(pid):
try:
os.kill(pid, signal.SIGTERM)
logger.info(f'Sent SIGTERM to process with PID {pid}')
except ProcessLookupError:
logger.error(f'No process found with PID {pid}')
except Exception as e:
logger.error(f'Failed to send SIGTERM to process with PID {pid}: {e}')
def token_verification(token):
if token.startswith('hf_oauth'):
_api_url = config.HF_API + '/oauth/userinfo'
_err_msg = '/oauth/userinfo'
else:
_api_url = config.HF_API + '/api/whoami-v2'
_err_msg = '/api/whoami-v2'
headers = {}
cookies = {}
if token.startswith('hf_'):
headers['Authorization'] = f'Bearer {token}'
else:
cookies = {'token': token}
try:
response = requests.get(_api_url, headers=headers, cookies=cookies, timeout=3)
except (requests.Timeout, ConnectionError) as err:
logger.error(f'Failed to request {_err_msg} - {repr(err)}')
raise Exception(f'Hugging Face Hub ({_err_msg}) is unreachable, please try again later.')
if response.status_code != 200:
logger.error(f'Failed to request {_err_msg} - {response.status_code}')
raise Exception(f'Invalid token ({_err_msg}). Please login with a write token.')
resp = response.json()
user_info = {}
if token.startswith('hf_oauth'):
user_info['id'] = resp['sub']
user_info['name'] = resp['preferred_username']
user_info['orgs'] = [resp['orgs'][k]['preferred_username'] for k in range(len(resp['orgs']))]
else:
user_info['id'] = resp['id']
user_info['name'] = resp['name']
user_info['orgs'] = [resp['orgs'][k]['name'] for k in range(len(resp['orgs']))]
return user_info
def get_user_and_orgs(user_token):
if user_token is None:
raise Exception('Please login with a write token.')
if user_token is None or len(user_token) == 0:
raise Exception('Invalid token. Please login with a write token.')
user_info = token_verification(token=user_token)
username = user_info['name']
orgs = user_info['orgs']
who_is_training = [username] + orgs
return who_is_training
# File: autotrain-advanced-main/src/autotrain/backends/base.py
import json
from dataclasses import dataclass
from typing import Union
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.generic.params import GenericParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
AVAILABLE_HARDWARE = {'spaces-a10g-large': 'a10g-large', 'spaces-a10g-small': 'a10g-small', 'spaces-a100-large': 'a100-large', 'spaces-t4-medium': 't4-medium', 'spaces-t4-small': 't4-small', 'spaces-cpu-upgrade': 'cpu-upgrade', 'spaces-cpu-basic': 'cpu-basic', 'spaces-l4x1': 'l4x1', 'spaces-l4x4': 'l4x4', 'spaces-l40sx1': 'l40sx1', 'spaces-l40sx4': 'l40sx4', 'spaces-l40sx8': 'l40sx8', 'spaces-a10g-largex2': 'a10g-largex2', 'spaces-a10g-largex4': 'a10g-largex4', 'dgx-a100': 'dgxa100.80g.1.norm', 'dgx-2a100': 'dgxa100.80g.2.norm', 'dgx-4a100': 'dgxa100.80g.4.norm', 'dgx-8a100': 'dgxa100.80g.8.norm', 'ep-aws-useast1-s': 'aws_us-east-1_gpu_small_g4dn.xlarge', 'ep-aws-useast1-m': 'aws_us-east-1_gpu_medium_g5.2xlarge', 'ep-aws-useast1-l': 'aws_us-east-1_gpu_large_g4dn.12xlarge', 'ep-aws-useast1-xl': 'aws_us-east-1_gpu_xlarge_p4de', 'ep-aws-useast1-2xl': 'aws_us-east-1_gpu_2xlarge_p4de', 'ep-aws-useast1-4xl': 'aws_us-east-1_gpu_4xlarge_p4de', 'ep-aws-useast1-8xl': 'aws_us-east-1_gpu_8xlarge_p4de', 'nvcf-l40sx1': {'id': '67bb8939-c932-429a-a446-8ae898311856'}, 'nvcf-h100x1': {'id': '848348f8-a4e2-4242-bce9-6baa1bd70a66'}, 'nvcf-h100x2': {'id': 'fb006a89-451e-4d9c-82b5-33eff257e0bf'}, 'nvcf-h100x4': {'id': '21bae5af-87e5-4132-8fc0-bf3084e59a57'}, 'nvcf-h100x8': {'id': '6e0c2af6-5368-47e0-b15e-c070c2c92018'}, 'local-ui': 'local', 'local': 'local', 'local-cli': 'local'}
@dataclass
class BaseBackend:
params: Union[TextClassificationParams, ImageClassificationParams, LLMTrainingParams, GenericParams, TabularParams, DreamBoothTrainingParams, Seq2SeqParams, TokenClassificationParams, TextRegressionParams, ObjectDetectionParams, SentenceTransformersParams, ImageRegressionParams, VLMTrainingParams, ExtractiveQuestionAnsweringParams]
backend: str
def __post_init__(self):
self.username = None
if isinstance(self.params, GenericParams) and self.backend.startswith('local'):
raise ValueError('Local backend is not supported for GenericParams')
if self.backend.startswith('spaces-') or self.backend.startswith('ep-') or self.backend.startswith('ngc-') or self.backend.startswith('nvcf-'):
if self.params.username is not None:
self.username = self.params.username
else:
raise ValueError('Must provide username')
if isinstance(self.params, LLMTrainingParams):
self.task_id = 9
elif isinstance(self.params, TextClassificationParams):
self.task_id = 2
elif isinstance(self.params, TabularParams):
self.task_id = 26
elif isinstance(self.params, GenericParams):
self.task_id = 27
elif isinstance(self.params, DreamBoothTrainingParams):
self.task_id = 25
elif isinstance(self.params, Seq2SeqParams):
self.task_id = 28
elif isinstance(self.params, ImageClassificationParams):
self.task_id = 18
elif isinstance(self.params, TokenClassificationParams):
self.task_id = 4
elif isinstance(self.params, TextRegressionParams):
self.task_id = 10
elif isinstance(self.params, ObjectDetectionParams):
self.task_id = 29
elif isinstance(self.params, SentenceTransformersParams):
self.task_id = 30
elif isinstance(self.params, ImageRegressionParams):
self.task_id = 24
elif isinstance(self.params, VLMTrainingParams):
self.task_id = 31
elif isinstance(self.params, ExtractiveQuestionAnsweringParams):
self.task_id = 5
else:
raise NotImplementedError
self.available_hardware = AVAILABLE_HARDWARE
self.wait = False
if self.backend == 'local-ui':
self.wait = False
if self.backend in ('local', 'local-cli'):
self.wait = True
self.env_vars = {'HF_TOKEN': self.params.token, 'AUTOTRAIN_USERNAME': self.username, 'PROJECT_NAME': self.params.project_name, 'TASK_ID': str(self.task_id), 'PARAMS': json.dumps(self.params.model_dump_json())}
if isinstance(self.params, DreamBoothTrainingParams):
self.env_vars['DATA_PATH'] = self.params.image_path
else:
self.env_vars['DATA_PATH'] = self.params.data_path
if not isinstance(self.params, GenericParams):
self.env_vars['MODEL'] = self.params.model
# File: autotrain-advanced-main/src/autotrain/backends/endpoints.py
import requests
from autotrain.backends.base import BaseBackend
ENDPOINTS_URL = 'https://api.endpoints.huggingface.cloud/v2/endpoint/'
class EndpointsRunner(BaseBackend):
def create(self):
hardware = self.available_hardware[self.backend]
accelerator = hardware.split('_')[2]
instance_size = hardware.split('_')[3]
region = hardware.split('_')[1]
vendor = hardware.split('_')[0]
instance_type = hardware.split('_')[4]
payload = {'accountId': self.username, 'compute': {'accelerator': accelerator, 'instanceSize': instance_size, 'instanceType': instance_type, 'scaling': {'maxReplica': 1, 'minReplica': 1}}, 'model': {'framework': 'custom', 'image': {'custom': {'env': {'HF_TOKEN': self.params.token, 'AUTOTRAIN_USERNAME': self.username, 'PROJECT_NAME': self.params.project_name, 'PARAMS': self.params.model_dump_json(), 'DATA_PATH': self.params.data_path, 'TASK_ID': str(self.task_id), 'MODEL': self.params.model, 'ENDPOINT_ID': f'{self.username}/{self.params.project_name}'}, 'health_route': '/', 'port': 7860, 'url': 'public.ecr.aws/z4c3o6n6/autotrain-api:latest'}}, 'repository': 'autotrain-projects/autotrain-advanced', 'revision': 'main', 'task': 'custom'}, 'name': self.params.project_name, 'provider': {'region': region, 'vendor': vendor}, 'type': 'protected'}
headers = {'Authorization': f'Bearer {self.params.token}'}
r = requests.post(ENDPOINTS_URL + self.username, json=payload, headers=headers, timeout=120)
return r.json()['name']
# File: autotrain-advanced-main/src/autotrain/backends/local.py
from autotrain import logger
from autotrain.backends.base import BaseBackend
from autotrain.utils import run_training
class LocalRunner(BaseBackend):
def create(self):
logger.info('Starting local training...')
params = self.env_vars['PARAMS']
task_id = int(self.env_vars['TASK_ID'])
training_pid = run_training(params, task_id, local=True, wait=self.wait)
if not self.wait:
logger.info(f'Training PID: {training_pid}')
return training_pid
# File: autotrain-advanced-main/src/autotrain/backends/ngc.py
import base64
import json
import os
import requests
from requests.exceptions import HTTPError
from autotrain import logger
from autotrain.backends.base import BaseBackend
NGC_API = os.environ.get('NGC_API', 'https://api.ngc.nvidia.com/v2/org')
NGC_AUTH = os.environ.get('NGC_AUTH', 'https://authn.nvidia.com')
NGC_ACE = os.environ.get('NGC_ACE')
NGC_ORG = os.environ.get('NGC_ORG')
NGC_API_KEY = os.environ.get('NGC_CLI_API_KEY')
NGC_TEAM = os.environ.get('NGC_TEAM')
class NGCRunner(BaseBackend):
def _user_authentication_ngc(self):
logger.info('Authenticating NGC user...')
scope = 'group/ngc'
querystring = {'service': 'ngc', 'scope': scope}
auth = f'$oauthtoken:{NGC_API_KEY}'
headers = {'Authorization': f"Basic {base64.b64encode(auth.encode('utf-8')).decode('utf-8')}", 'Content-Type': 'application/json', 'Cache-Control': 'no-cache'}
try:
response = requests.get(NGC_AUTH + '/token', headers=headers, params=querystring, timeout=30)
except HTTPError as http_err:
logger.error(f'HTTP error occurred: {http_err}')
raise Exception("HTTP Error %d: from '%s'" % (response.status_code, NGC_AUTH))
except (requests.Timeout, ConnectionError) as err:
logger.error(f'Failed to request NGC token - {repr(err)}')
raise Exception('%s is unreachable, please try again later.' % NGC_AUTH)
return json.loads(response.text.encode('utf8'))['token']
def _create_ngc_job(self, token, url, payload):
logger.info('Creating NGC Job')
headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}
try:
response = requests.post(NGC_API + url + '/jobs', headers=headers, json=payload, timeout=30)
result = response.json()
logger.info(f"NGC Job ID: {result.get('job', {}).get('id')}, Job Status History: {result.get('jobStatusHistory')}")
return result.get('job', {}).get('id')
except HTTPError as http_err:
logger.error(f'HTTP error occurred: {http_err}')
raise Exception(f'HTTP Error {response.status_code}: {http_err}')
except (requests.Timeout, ConnectionError) as err:
logger.error(f'Failed to create NGC job - {repr(err)}')
raise Exception(f'Unreachable, please try again later: {err}')
def create(self):
job_name = f'{self.username}-{self.params.project_name}'
ngc_url = f'/{NGC_ORG}/team/{NGC_TEAM}'
ngc_cmd = 'set -x; conda run --no-capture-output -p /app/env autotrain api --port 7860 --host 0.0.0.0'
ngc_payload = {'name': job_name, 'aceName': NGC_ACE, 'aceInstance': self.available_hardware[self.backend], 'dockerImageName': f'{NGC_ORG}/autotrain-advanced:latest', 'command': ngc_cmd, 'envs': [{'name': key, 'value': value} for (key, value) in self.env_vars.items()], 'jobOrder': 50, 'jobPriority': 'NORMAL', 'portMappings': [{'containerPort': 7860, 'protocol': 'HTTPS'}], 'resultContainerMountPoint': '/results', 'runPolicy': {'preemptClass': 'RUNONCE', 'totalRuntimeSeconds': 259200}}
ngc_token = self._user_authentication_ngc()
job_id = self._create_ngc_job(ngc_token, ngc_url, ngc_payload)
return job_id
# File: autotrain-advanced-main/src/autotrain/backends/nvcf.py
import os
import threading
import time
from types import SimpleNamespace
import requests
from autotrain import logger
from autotrain.backends.base import BaseBackend
NVCF_API = 'https://huggingface.co/api/integrations/dgx/v1'
class NVCFRunner(BaseBackend):
def _convert_dict_to_object(self, dictionary):
if isinstance(dictionary, dict):
for (key, value) in dictionary.items():
dictionary[key] = self._convert_dict_to_object(value)
return SimpleNamespace(**dictionary)
elif isinstance(dictionary, list):
return [self._convert_dict_to_object(item) for item in dictionary]
else:
return dictionary
def _conf_nvcf(self, token, nvcf_type, url, job_name, method='POST', payload=None):
logger.info(f'{job_name}: {method} - Configuring NVCF {nvcf_type}.')
headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}
try:
if method.upper() == 'POST':
response = requests.post(url, headers=headers, json=payload, timeout=30)
else:
raise ValueError(f'Unsupported HTTP method: {method}')
response.raise_for_status()
if response.status_code == 202:
logger.info(f'{job_name}: {method} - Successfully submitted NVCF job. Polling reqId for completion')
response_data = response.json()
nvcf_reqid = response_data.get('nvcfRequestId')
if nvcf_reqid:
logger.info(f'{job_name}: nvcfRequestId: {nvcf_reqid}')
return nvcf_reqid
logger.warning(f'{job_name}: nvcfRequestId key is missing in the response body')
return None
result = response.json()
result_obj = self._convert_dict_to_object(result)
logger.info(f'{job_name}: {method} - Successfully processed NVCF {nvcf_type}.')
return result_obj
except requests.HTTPError as http_err:
error_message = http_err.response.text if http_err.response else 'No additional error information.'
logger.error(f'{job_name}: HTTP error occurred processing NVCF {nvcf_type} with {method} request: {http_err}. Error details: {error_message}')
raise Exception(f'HTTP Error {http_err.response.status_code}: {http_err}. Details: {error_message}')
except (requests.Timeout, ConnectionError) as err:
logger.error(f'{job_name}: Failed to process NVCF {nvcf_type} with {method} request - {repr(err)}')
raise Exception(f'Unreachable, please try again later: {err}')
def _poll_nvcf(self, url, token, job_name, method='get', timeout=86400, interval=30, op='poll'):
timeout = float(timeout)
interval = float(interval)
start_time = time.time()
success = False
last_full_log = ''
while time.time() - start_time < timeout:
try:
headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}
if method.upper() == 'GET':
response = requests.get(url, headers=headers)
else:
raise ValueError(f'Unsupported HTTP method: {method}')
if response.status_code == 404 and success:
break
response.raise_for_status()
try:
data = response.json()
except ValueError:
logger.error('Failed to parse JSON from response')
continue
if response.status_code == 500:
logger.error('Training failed')
if 'detail' in data:
detail_message = data['detail']
for line in detail_message.split('\n'):
if line.strip():
print(line)
break
if response.status_code in [200, 202]:
logger.info(f"{job_name}: {method} - {response.status_code} - {('Polling completed' if response.status_code == 200 else 'Polling reqId for completion')}")
if 'log' in data:
current_full_log = data['log']
if current_full_log != last_full_log:
new_log_content = current_full_log[len(last_full_log):]
for line in new_log_content.split('\n'):
if line.strip():
print(line)
last_full_log = current_full_log
if response.status_code == 200:
success = True
except requests.HTTPError as http_err:
if not (http_err.response.status_code == 404 and success):
logger.error(f'HTTP error occurred: {http_err}')
except (requests.ConnectionError, ValueError) as err:
logger.error(f'Error while handling request: {err}')
time.sleep(interval)
if not success:
raise TimeoutError(f"Operation '{op}' did not complete successfully within the timeout period.")
def create(self):
hf_token = self.env_vars['HF_TOKEN']
job_name = f'{self.username}-{self.params.project_name}'
logger.info('Starting NVCF training')
logger.info(f'job_name: {job_name}')
logger.info(f'backend: {self.backend}')
nvcf_url_submit = f"{NVCF_API}/invoke/{self.available_hardware[self.backend]['id']}"
org_name = os.environ.get('SPACE_ID')
if org_name is None:
raise ValueError('SPACE_ID environment variable is not set')
org_name = org_name.split('/')[0]
nvcf_fr_payload = {'cmd': ['conda', 'run', '--no-capture-output', '-p', '/app/env', 'python', '-u', '-m', 'uvicorn', 'autotrain.app.training_api:api', '--host', '0.0.0.0', '--port', '7860'], 'env': {key: value for (key, value) in self.env_vars.items()}, 'ORG_NAME': org_name}
nvcf_fn_req = self._conf_nvcf(token=hf_token, nvcf_type='job_submit', url=nvcf_url_submit, job_name=job_name, method='POST', payload=nvcf_fr_payload)
nvcf_url_reqpoll = f'{NVCF_API}/status/{nvcf_fn_req}'
logger.info(f'{job_name}: Polling : {nvcf_url_reqpoll}')
poll_thread = threading.Thread(target=self._poll_nvcf, kwargs={'url': nvcf_url_reqpoll, 'token': hf_token, 'job_name': job_name, 'method': 'GET', 'timeout': 172800, 'interval': 20})
poll_thread.start()
return nvcf_fn_req
# File: autotrain-advanced-main/src/autotrain/backends/spaces.py
import io
from huggingface_hub import HfApi
from autotrain.backends.base import BaseBackend
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.generic.params import GenericParams
_DOCKERFILE = '\nFROM huggingface/autotrain-advanced:latest\n\nCMD pip uninstall -y autotrain-advanced && pip install -U autotrain-advanced && autotrain api --port 7860 --host 0.0.0.0\n'
_DOCKERFILE = _DOCKERFILE.replace('\n', ' ').replace(' ', '\n').strip()
class SpaceRunner(BaseBackend):
def _create_readme(self):
_readme = '---\n'
_readme += f'title: {self.params.project_name}\n'
_readme += 'emoji: 🚀\n'
_readme += 'colorFrom: green\n'
_readme += 'colorTo: indigo\n'
_readme += 'sdk: docker\n'
_readme += 'pinned: false\n'
_readme += 'duplicated_from: autotrain-projects/autotrain-advanced\n'
_readme += '---\n'
_readme = io.BytesIO(_readme.encode())
return _readme
def _add_secrets(self, api, space_id):
if isinstance(self.params, GenericParams):
for (k, v) in self.params.env.items():
api.add_space_secret(repo_id=space_id, key=k, value=v)
self.params.env = {}
api.add_space_secret(repo_id=space_id, key='HF_TOKEN', value=self.params.token)
api.add_space_secret(repo_id=space_id, key='AUTOTRAIN_USERNAME', value=self.username)
api.add_space_secret(repo_id=space_id, key='PROJECT_NAME', value=self.params.project_name)
api.add_space_secret(repo_id=space_id, key='TASK_ID', value=str(self.task_id))
api.add_space_secret(repo_id=space_id, key='PARAMS', value=self.params.model_dump_json())
if isinstance(self.params, DreamBoothTrainingParams):
api.add_space_secret(repo_id=space_id, key='DATA_PATH', value=self.params.image_path)
else:
api.add_space_secret(repo_id=space_id, key='DATA_PATH', value=self.params.data_path)
if not isinstance(self.params, GenericParams):
api.add_space_secret(repo_id=space_id, key='MODEL', value=self.params.model)
def create(self):
api = HfApi(token=self.params.token)
space_id = f'{self.username}/autotrain-{self.params.project_name}'
api.create_repo(repo_id=space_id, repo_type='space', space_sdk='docker', space_hardware=self.available_hardware[self.backend], private=True)
self._add_secrets(api, space_id)
readme = self._create_readme()
api.upload_file(path_or_fileobj=readme, path_in_repo='README.md', repo_id=space_id, repo_type='space')
_dockerfile = io.BytesIO(_DOCKERFILE.encode())
api.upload_file(path_or_fileobj=_dockerfile, path_in_repo='Dockerfile', repo_id=space_id, repo_type='space')
return space_id
# File: autotrain-advanced-main/src/autotrain/cli/__init__.py
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseAutoTrainCommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
# File: autotrain-advanced-main/src/autotrain/cli/autotrain.py
import argparse
from autotrain import __version__, logger
from autotrain.cli.run_api import RunAutoTrainAPICommand
from autotrain.cli.run_app import RunAutoTrainAppCommand
from autotrain.cli.run_dreambooth import RunAutoTrainDreamboothCommand
from autotrain.cli.run_extractive_qa import RunAutoTrainExtractiveQACommand
from autotrain.cli.run_image_classification import RunAutoTrainImageClassificationCommand
from autotrain.cli.run_image_regression import RunAutoTrainImageRegressionCommand
from autotrain.cli.run_llm import RunAutoTrainLLMCommand
from autotrain.cli.run_object_detection import RunAutoTrainObjectDetectionCommand
from autotrain.cli.run_sent_tranformers import RunAutoTrainSentenceTransformersCommand
from autotrain.cli.run_seq2seq import RunAutoTrainSeq2SeqCommand
from autotrain.cli.run_setup import RunSetupCommand
from autotrain.cli.run_spacerunner import RunAutoTrainSpaceRunnerCommand
from autotrain.cli.run_tabular import RunAutoTrainTabularCommand
from autotrain.cli.run_text_classification import RunAutoTrainTextClassificationCommand
from autotrain.cli.run_text_regression import RunAutoTrainTextRegressionCommand
from autotrain.cli.run_token_classification import RunAutoTrainTokenClassificationCommand
from autotrain.cli.run_tools import RunAutoTrainToolsCommand
from autotrain.parser import AutoTrainConfigParser
def main():
parser = argparse.ArgumentParser('AutoTrain advanced CLI', usage='autotrain []', epilog='For more information about a command, run: `autotrain --help`')
parser.add_argument('--version', '-v', help='Display AutoTrain version', action='store_true')
parser.add_argument('--config', help='Optional configuration file', type=str)
commands_parser = parser.add_subparsers(help='commands')
RunAutoTrainAppCommand.register_subcommand(commands_parser)
RunAutoTrainLLMCommand.register_subcommand(commands_parser)
RunSetupCommand.register_subcommand(commands_parser)
RunAutoTrainDreamboothCommand.register_subcommand(commands_parser)
RunAutoTrainAPICommand.register_subcommand(commands_parser)
RunAutoTrainTextClassificationCommand.register_subcommand(commands_parser)
RunAutoTrainImageClassificationCommand.register_subcommand(commands_parser)
RunAutoTrainTabularCommand.register_subcommand(commands_parser)
RunAutoTrainSpaceRunnerCommand.register_subcommand(commands_parser)
RunAutoTrainSeq2SeqCommand.register_subcommand(commands_parser)
RunAutoTrainTokenClassificationCommand.register_subcommand(commands_parser)
RunAutoTrainToolsCommand.register_subcommand(commands_parser)
RunAutoTrainTextRegressionCommand.register_subcommand(commands_parser)
RunAutoTrainObjectDetectionCommand.register_subcommand(commands_parser)
RunAutoTrainSentenceTransformersCommand.register_subcommand(commands_parser)
RunAutoTrainImageRegressionCommand.register_subcommand(commands_parser)
RunAutoTrainExtractiveQACommand.register_subcommand(commands_parser)
args = parser.parse_args()
if args.version:
print(__version__)
exit(0)
if args.config:
logger.info(f'Using AutoTrain configuration: {args.config}')
cp = AutoTrainConfigParser(args.config)
cp.run()
exit(0)
if not hasattr(args, 'func'):
parser.print_help()
exit(1)
command = args.func(args)
command.run()
if __name__ == '__main__':
main()
# File: autotrain-advanced-main/src/autotrain/cli/run_api.py
from argparse import ArgumentParser
from . import BaseAutoTrainCommand
def run_api_command_factory(args):
return RunAutoTrainAPICommand(args.port, args.host, args.task)
class RunAutoTrainAPICommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_api_parser = parser.add_parser('api', description='✨ Run AutoTrain API')
run_api_parser.add_argument('--port', type=int, default=7860, help='Port to run the api on', required=False)
run_api_parser.add_argument('--host', type=str, default='127.0.0.1', help='Host to run the api on', required=False)
run_api_parser.add_argument('--task', type=str, required=False, help='Task to run')
run_api_parser.set_defaults(func=run_api_command_factory)
def __init__(self, port, host, task):
self.port = port
self.host = host
self.task = task
def run(self):
import uvicorn
from autotrain.app.training_api import api
uvicorn.run(api, host=self.host, port=self.port)
# File: autotrain-advanced-main/src/autotrain/cli/run_app.py
import os
import signal
import subprocess
import sys
import threading
from argparse import ArgumentParser
from autotrain import logger
from . import BaseAutoTrainCommand
def handle_output(stream, log_file):
while True:
line = stream.readline()
if not line:
break
sys.stdout.write(line)
sys.stdout.flush()
log_file.write(line)
log_file.flush()
def run_app_command_factory(args):
return RunAutoTrainAppCommand(args.port, args.host, args.share, args.workers, args.colab)
class RunAutoTrainAppCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_app_parser = parser.add_parser('app', description='✨ Run AutoTrain app')
run_app_parser.add_argument('--port', type=int, default=7860, help='Port to run the app on', required=False)
run_app_parser.add_argument('--host', type=str, default='127.0.0.1', help='Host to run the app on', required=False)
run_app_parser.add_argument('--workers', type=int, default=1, help='Number of workers to run the app with', required=False)
run_app_parser.add_argument('--share', action='store_true', help='Share the app on ngrok', required=False)
run_app_parser.add_argument('--colab', action='store_true', help='Use app in colab', required=False)
run_app_parser.set_defaults(func=run_app_command_factory)
def __init__(self, port, host, share, workers, colab):
self.port = port
self.host = host
self.share = share
self.workers = workers
self.colab = colab
def run(self):
if self.colab:
from IPython.display import display
from autotrain.app.colab import colab_app
elements = colab_app()
display(elements)
return
if self.share:
from pyngrok import ngrok
os.system(f'fuser -n tcp -k {self.port}')
authtoken = os.environ.get('NGROK_AUTH_TOKEN', '')
if authtoken.strip() == '':
logger.info('NGROK_AUTH_TOKEN not set')
raise ValueError('NGROK_AUTH_TOKEN not set. Please set it!')
ngrok.set_auth_token(authtoken)
active_tunnels = ngrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
ngrok.disconnect(public_url)
url = ngrok.connect(addr=self.port, bind_tls=True)
logger.info(f'AutoTrain Public URL: {url}')
logger.info('Please wait for the app to load...')
command = f'uvicorn autotrain.app.app:app --host {self.host} --port {self.port}'
command += f' --workers {self.workers}'
with open('autotrain.log', 'w', encoding='utf-8') as log_file:
if sys.platform == 'win32':
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, text=True, bufsize=1)
else:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, text=True, bufsize=1, preexec_fn=os.setsid)
output_thread = threading.Thread(target=handle_output, args=(process.stdout, log_file))
output_thread.start()
try:
process.wait()
output_thread.join()
except KeyboardInterrupt:
logger.warning('Attempting to terminate the process...')
if sys.platform == 'win32':
process.terminate()
else:
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
logger.info('Process terminated by user')
# File: autotrain-advanced-main/src/autotrain/cli/run_dreambooth.py
import glob
import os
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli import BaseAutoTrainCommand
from autotrain.cli.utils import common_args, dreambooth_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.dreambooth.utils import VALID_IMAGE_EXTENSIONS, XL_MODELS
def count_images(directory):
files_grabbed = []
for files in VALID_IMAGE_EXTENSIONS:
files_grabbed.extend(glob.glob(os.path.join(directory, '*' + files)))
return len(files_grabbed)
def run_dreambooth_command_factory(args):
return RunAutoTrainDreamboothCommand(args)
class RunAutoTrainDreamboothCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [{'arg': '--revision', 'help': 'Model revision to use for training', 'required': False, 'type': str}, {'arg': '--tokenizer', 'help': 'Tokenizer to use for training', 'required': False, 'type': str}, {'arg': '--image-path', 'help': 'Path to the images', 'required': True, 'type': str}, {'arg': '--class-image-path', 'help': 'Path to the class images', 'required': False, 'type': str}, {'arg': '--prompt', 'help': 'Instance prompt', 'required': True, 'type': str}, {'arg': '--class-prompt', 'help': 'Class prompt', 'required': False, 'type': str}, {'arg': '--num-class-images', 'help': 'Number of class images', 'required': False, 'default': 100, 'type': int}, {'arg': '--class-labels-conditioning', 'help': 'Class labels conditioning', 'required': False, 'type': str}, {'arg': '--prior-preservation', 'help': 'With prior preservation', 'required': False, 'action': 'store_true'}, {'arg': '--prior-loss-weight', 'help': 'Prior loss weight', 'required': False, 'default': 1.0, 'type': float}, {'arg': '--resolution', 'help': 'Resolution', 'required': True, 'type': int}, {'arg': '--center-crop', 'help': 'Center crop', 'required': False, 'action': 'store_true'}, {'arg': '--train-text-encoder', 'help': 'Train text encoder', 'required': False, 'action': 'store_true'}, {'arg': '--sample-batch-size', 'help': 'Sample batch size', 'required': False, 'default': 4, 'type': int}, {'arg': '--num-steps', 'help': 'Max train steps', 'required': False, 'type': int}, {'arg': '--checkpointing-steps', 'help': 'Checkpointing steps', 'required': False, 'default': 100000, 'type': int}, {'arg': '--resume-from-checkpoint', 'help': 'Resume from checkpoint', 'required': False, 'type': str}, {'arg': '--scale-lr', 'help': 'Scale learning rate', 'required': False, 'action': 'store_true'}, {'arg': '--scheduler', 'help': 'Learning rate scheduler', 'required': False, 'default': 'constant'}, {'arg': '--warmup-steps', 'help': 'Learning rate warmup steps', 'required': False, 'default': 0, 'type': int}, {'arg': '--num-cycles', 'help': 'Learning rate num cycles', 'required': False, 'default': 1, 'type': int}, {'arg': '--lr-power', 'help': 'Learning rate power', 'required': False, 'default': 1.0, 'type': float}, {'arg': '--dataloader-num-workers', 'help': 'Dataloader num workers', 'required': False, 'default': 0, 'type': int}, {'arg': '--use-8bit-adam', 'help': 'Use 8bit adam', 'required': False, 'action': 'store_true'}, {'arg': '--adam-beta1', 'help': 'Adam beta 1', 'required': False, 'default': 0.9, 'type': float}, {'arg': '--adam-beta2', 'help': 'Adam beta 2', 'required': False, 'default': 0.999, 'type': float}, {'arg': '--adam-weight-decay', 'help': 'Adam weight decay', 'required': False, 'default': 0.01, 'type': float}, {'arg': '--adam-epsilon', 'help': 'Adam epsilon', 'required': False, 'default': 1e-08, 'type': float}, {'arg': '--max-grad-norm', 'help': 'Max grad norm', 'required': False, 'default': 1.0, 'type': float}, {'arg': '--allow-tf32', 'help': 'Allow TF32', 'required': False, 'action': 'store_true'}, {'arg': '--prior-generation-precision', 'help': 'Prior generation precision', 'required': False, 'type': str}, {'arg': '--local-rank', 'help': 'Local rank', 'required': False, 'default': -1, 'type': int}, {'arg': '--xformers', 'help': 'Enable xformers memory efficient attention', 'required': False, 'action': 'store_true'}, {'arg': '--pre-compute-text-embeddings', 'help': 'Pre compute text embeddings', 'required': False, 'action': 'store_true'}, {'arg': '--tokenizer-max-length', 'help': 'Tokenizer max length', 'required': False, 'type': int}, {'arg': '--text-encoder-use-attention-mask', 'help': 'Text encoder use attention mask', 'required': False, 'action': 'store_true'}, {'arg': '--rank', 'help': 'Rank', 'required': False, 'default': 4, 'type': int}, {'arg': '--xl', 'help': 'XL', 'required': False, 'action': 'store_true'}, {'arg': '--mixed-precision', 'help': 'mixed precision, fp16, bf16, none', 'required': False, 'type': str, 'default': 'none'}, {'arg': '--validation-prompt', 'help': 'Validation prompt', 'required': False, 'type': str}, {'arg': '--num-validation-images', 'help': 'Number of validation images', 'required': False, 'default': 4, 'type': int}, {'arg': '--validation-epochs', 'help': 'Validation epochs', 'required': False, 'default': 50, 'type': int}, {'arg': '--checkpoints-total-limit', 'help': 'Checkpoints total limit', 'required': False, 'type': int}, {'arg': '--validation-images', 'help': 'Validation images', 'required': False, 'type': str}, {'arg': '--logging', 'help': 'Logging using tensorboard', 'required': False, 'action': 'store_true'}]
arg_list = common_args() + arg_list
run_dreambooth_parser = parser.add_parser('dreambooth', description='✨ Run AutoTrain DreamBooth Training')
for arg in arg_list:
if 'action' in arg:
run_dreambooth_parser.add_argument(arg['arg'], help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_dreambooth_parser.add_argument(arg['arg'], help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_dreambooth_parser.set_defaults(func=run_dreambooth_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['center_crop', 'train_text_encoder', 'disable_gradient_checkpointing', 'scale_lr', 'use_8bit_adam', 'allow_tf32', 'xformers', 'pre_compute_text_embeddings', 'text_encoder_use_attention_mask', 'xl', 'push_to_hub', 'logging', 'prior_preservation']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if not os.path.isdir(self.args.image_path):
raise ValueError('❌ Please specify a valid image directory')
num_images = count_images(self.args.image_path)
if num_images == 0:
raise ValueError('❌ Please specify a valid image directory')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('❌ Please specify a username to push to hub')
if self.args.model in XL_MODELS:
self.args.xl = True
if self.args.backend.startswith('spaces') or self.args.backend.startswith('ep-'):
if not self.args.push_to_hub:
raise ValueError('Push to hub must be specified for spaces backend')
if self.args.username is None:
raise ValueError('Username must be specified for spaces backend')
if self.args.token is None:
raise ValueError('Token must be specified for spaces backend')
def run(self):
logger.info('Running DreamBooth Training')
params = DreamBoothTrainingParams(**vars(self.args))
params = dreambooth_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_extractive_qa.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import ext_qa_munge_data, get_field_info
from autotrain.project import AutoTrainProject
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from . import BaseAutoTrainCommand
def run_extractive_qa_command_factory(args):
return RunAutoTrainExtractiveQACommand(args)
class RunAutoTrainExtractiveQACommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(ExtractiveQuestionAnsweringParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend to use for training', 'required': False, 'default': 'local'}] + arg_list
arg_list = [arg for arg in arg_list if arg['arg'] != '--disable-gradient-checkpointing']
run_extractive_qa_parser = parser.add_parser('extractive-qa', description='✨ Run AutoTrain Extractive Question Answering')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_extractive_qa_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_extractive_qa_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_extractive_qa_parser.set_defaults(func=run_extractive_qa_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
def run(self):
logger.info('Running Extractive Question Answering')
if self.args.train:
params = ExtractiveQuestionAnsweringParams(**vars(self.args))
params = ext_qa_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_image_classification.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, img_clf_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.image_classification.params import ImageClassificationParams
from . import BaseAutoTrainCommand
def run_image_classification_command_factory(args):
return RunAutoTrainImageClassificationCommand(args)
class RunAutoTrainImageClassificationCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(ImageClassificationParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
run_image_classification_parser = parser.add_parser('image-classification', description='✨ Run AutoTrain Image Classification')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_image_classification_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_image_classification_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_image_classification_parser.set_defaults(func=run_image_classification_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
if self.args.backend.startswith('spaces') or self.args.backend.startswith('ep-'):
if not self.args.push_to_hub:
raise ValueError('Push to hub must be specified for spaces backend')
if self.args.username is None:
raise ValueError('Username must be specified for spaces backend')
if self.args.token is None:
raise ValueError('Token must be specified for spaces backend')
def run(self):
logger.info('Running Image Classification')
if self.args.train:
params = ImageClassificationParams(**vars(self.args))
params = img_clf_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_image_regression.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, img_reg_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.image_regression.params import ImageRegressionParams
from . import BaseAutoTrainCommand
def run_image_regression_command_factory(args):
return RunAutoTrainImageRegressionCommand(args)
class RunAutoTrainImageRegressionCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(ImageRegressionParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
run_image_regression_parser = parser.add_parser('image-regression', description='✨ Run AutoTrain Image Regression')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_image_regression_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_image_regression_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_image_regression_parser.set_defaults(func=run_image_regression_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
if self.args.backend.startswith('spaces') or self.args.backend.startswith('ep-'):
if not self.args.push_to_hub:
raise ValueError('Push to hub must be specified for spaces backend')
if self.args.username is None:
raise ValueError('Username must be specified for spaces backend')
if self.args.token is None:
raise ValueError('Token must be specified for spaces backend')
def run(self):
logger.info('Running Image Regression')
if self.args.train:
params = ImageRegressionParams(**vars(self.args))
params = img_reg_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_llm.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, llm_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.clm.params import LLMTrainingParams
from . import BaseAutoTrainCommand
def run_llm_command_factory(args):
return RunAutoTrainLLMCommand(args)
class RunAutoTrainLLMCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(LLMTrainingParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
arg_list = [arg for arg in arg_list if arg['arg'] != '--block-size']
arg_list.append({'arg': '--block_size', 'help': 'Block size', 'required': False, 'type': str, 'default': '1024', 'alias': ['--block-size']})
run_llm_parser = parser.add_parser('llm', description='✨ Run AutoTrain LLM')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_llm_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_llm_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_llm_parser.set_defaults(func=run_llm_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'add_eos_token', 'peft', 'auto_find_batch_size', 'push_to_hub', 'merge_adapter', 'use_flash_attention_2', 'disable_gradient_checkpointing']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
block_size_split = self.args.block_size.strip().split(',')
if len(block_size_split) == 1:
self.args.block_size = int(block_size_split[0])
elif len(block_size_split) > 1:
self.args.block_size = [int(x.strip()) for x in block_size_split]
else:
raise ValueError('Invalid block size')
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Usernamemust be specified for push to hub')
if self.args.token is None:
raise ValueError('Token must be specified for push to hub')
if self.args.backend.startswith('spaces') or self.args.backend.startswith('ep-'):
if not self.args.push_to_hub:
raise ValueError('Push to hub must be specified for spaces backend')
if self.args.username is None:
raise ValueError('Username must be specified for spaces backend')
if self.args.token is None:
raise ValueError('Token must be specified for spaces backend')
if self.args.deploy:
raise NotImplementedError('Deploy is not implemented yet')
if self.args.inference:
raise NotImplementedError('Inference is not implemented yet')
def run(self):
logger.info('Running LLM')
if self.args.train:
params = LLMTrainingParams(**vars(self.args))
params = llm_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_object_detection.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, img_obj_detect_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from . import BaseAutoTrainCommand
def run_object_detection_command_factory(args):
return RunAutoTrainObjectDetectionCommand(args)
class RunAutoTrainObjectDetectionCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(ObjectDetectionParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
run_object_detection_parser = parser.add_parser('object-detection', description='✨ Run AutoTrain Object Detection')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_object_detection_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_object_detection_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_object_detection_parser.set_defaults(func=run_object_detection_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
if self.args.backend.startswith('spaces') or self.args.backend.startswith('ep-'):
if not self.args.push_to_hub:
raise ValueError('Push to hub must be specified for spaces backend')
if self.args.username is None:
raise ValueError('Username must be specified for spaces backend')
if self.args.token is None:
raise ValueError('Token must be specified for spaces backend')
def run(self):
logger.info('Running Object Detection')
if self.args.train:
params = ObjectDetectionParams(**vars(self.args))
params = img_obj_detect_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_sent_tranformers.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, sent_transformers_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from . import BaseAutoTrainCommand
def run_sentence_transformers_command_factory(args):
return RunAutoTrainSentenceTransformersCommand(args)
class RunAutoTrainSentenceTransformersCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(SentenceTransformersParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
run_sentence_transformers_parser = parser.add_parser('sentence-transformers', description='✨ Run AutoTrain Sentence Transformers')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_sentence_transformers_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_sentence_transformers_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_sentence_transformers_parser.set_defaults(func=run_sentence_transformers_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
if self.args.backend.startswith('spaces') or self.args.backend.startswith('ep-'):
if not self.args.push_to_hub:
raise ValueError('Push to hub must be specified for spaces backend')
if self.args.username is None:
raise ValueError('Username must be specified for spaces backend')
if self.args.token is None:
raise ValueError('Token must be specified for spaces backend')
def run(self):
logger.info('Running Sentence Transformers...')
if self.args.train:
params = SentenceTransformersParams(**vars(self.args))
params = sent_transformers_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_seq2seq.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, seq2seq_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from . import BaseAutoTrainCommand
def run_seq2seq_command_factory(args):
return RunAutoTrainSeq2SeqCommand(args)
class RunAutoTrainSeq2SeqCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(Seq2SeqParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
run_seq2seq_parser = parser.add_parser('seq2seq', description='✨ Run AutoTrain Seq2Seq')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_seq2seq_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_seq2seq_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_seq2seq_parser.set_defaults(func=run_seq2seq_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub', 'peft']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
def run(self):
logger.info('Running Seq2Seq Classification')
if self.args.train:
params = Seq2SeqParams(**vars(self.args))
params = seq2seq_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_spacerunner.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.backends.base import AVAILABLE_HARDWARE
from autotrain.backends.spaces import SpaceRunner
from autotrain.trainers.generic.params import GenericParams
from autotrain.trainers.generic.utils import create_dataset_repo
from . import BaseAutoTrainCommand
BACKEND_CHOICES = list(AVAILABLE_HARDWARE.keys())
BACKEND_CHOICES = [b for b in BACKEND_CHOICES if b.startswith('spaces-')]
def run_spacerunner_command_factory(args):
return RunAutoTrainSpaceRunnerCommand(args)
class RunAutoTrainSpaceRunnerCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [{'arg': '--project-name', 'help': 'Name of the project. Must be unique.', 'required': True, 'type': str}, {'arg': '--script-path', 'help': 'Path to the script', 'required': True, 'type': str}, {'arg': '--username', 'help': 'Hugging Face Username, can also be an organization name', 'required': True, 'type': str}, {'arg': '--token', 'help': 'Hugging Face API Token', 'required': True, 'type': str}, {'arg': '--backend', 'help': 'Hugging Face backend to use', 'required': True, 'type': str, 'choices': BACKEND_CHOICES}, {'arg': '--env', 'help': 'Environment variables, e.g. --env FOO=bar;FOO2=bar2;FOO3=bar3', 'required': False, 'type': str}, {'arg': '--args', 'help': 'Arguments to pass to the script, e.g. --args foo=bar;foo2=bar2;foo3=bar3;store_true_arg', 'required': False, 'type': str}]
run_spacerunner_parser = parser.add_parser('spacerunner', description='✨ Run AutoTrain SpaceRunner')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_spacerunner_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'), choices=arg.get('choices'))
else:
run_spacerunner_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_spacerunner_parser.set_defaults(func=run_spacerunner_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = []
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
env_vars = {}
if self.args.env:
for env_name_value in self.args.env.split(';'):
if len(env_name_value.split('=')) == 2:
env_vars[env_name_value.split('=')[0]] = env_name_value.split('=')[1]
else:
raise ValueError('Invalid environment variable format.')
self.args.env = env_vars
app_args = {}
store_true_args = []
if self.args.args:
for arg_name_value in self.args.args.split(';'):
if len(arg_name_value.split('=')) == 1:
store_true_args.append(arg_name_value)
elif len(arg_name_value.split('=')) == 2:
app_args[arg_name_value.split('=')[0]] = arg_name_value.split('=')[1]
else:
raise ValueError('Invalid argument format.')
for arg_name in store_true_args:
app_args[arg_name] = ''
self.args.args = app_args
def run(self):
dataset_id = create_dataset_repo(username=self.args.username, project_name=self.args.project_name, script_path=self.args.script_path, token=self.args.token)
params = GenericParams(project_name=self.args.project_name, data_path=dataset_id, username=self.args.username, token=self.args.token, script_path=self.args.script_path, env=self.args.env, args=self.args.args)
project = SpaceRunner(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_tabular.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, tabular_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.tabular.params import TabularParams
from . import BaseAutoTrainCommand
def run_tabular_command_factory(args):
return RunAutoTrainTabularCommand(args)
class RunAutoTrainTabularCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(TabularParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
remove_args = ['--disable_gradient_checkpointing', '--gradient_accumulation', '--epochs', '--log', '--lr']
arg_list = [arg for arg in arg_list if arg['arg'] not in remove_args]
run_tabular_parser = parser.add_parser('tabular', description='✨ Run AutoTrain Tabular Data Training')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_tabular_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_tabular_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_tabular_parser.set_defaults(func=run_tabular_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
self.args.target_columns = [k.strip() for k in self.args.target_columns.split(',')]
def run(self):
logger.info('Running Tabular Training')
if self.args.train:
params = TabularParams(**vars(self.args))
params = tabular_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_text_classification.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, text_clf_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.text_classification.params import TextClassificationParams
from . import BaseAutoTrainCommand
def run_text_classification_command_factory(args):
return RunAutoTrainTextClassificationCommand(args)
class RunAutoTrainTextClassificationCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(TextClassificationParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
arg_list = [arg for arg in arg_list if arg['arg'] != '--disable-gradient-checkpointing']
run_text_classification_parser = parser.add_parser('text-classification', description='✨ Run AutoTrain Text Classification')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_text_classification_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_text_classification_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_text_classification_parser.set_defaults(func=run_text_classification_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
def run(self):
logger.info('Running Text Classification')
if self.args.train:
params = TextClassificationParams(**vars(self.args))
params = text_clf_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_text_regression.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, text_reg_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.text_regression.params import TextRegressionParams
from . import BaseAutoTrainCommand
def run_text_regression_command_factory(args):
return RunAutoTrainTextRegressionCommand(args)
class RunAutoTrainTextRegressionCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(TextRegressionParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
arg_list = [arg for arg in arg_list if arg['arg'] != '--disable-gradient-checkpointing']
run_text_regression_parser = parser.add_parser('text-regression', description='✨ Run AutoTrain Text Regression')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_text_regression_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_text_regression_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_text_regression_parser.set_defaults(func=run_text_regression_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
def run(self):
logger.info('Running Text Regression')
if self.args.train:
params = TextRegressionParams(**vars(self.args))
params = text_reg_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_token_classification.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, token_clf_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.token_classification.params import TokenClassificationParams
from . import BaseAutoTrainCommand
def run_token_classification_command_factory(args):
return RunAutoTrainTokenClassificationCommand(args)
class RunAutoTrainTokenClassificationCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(TokenClassificationParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
arg_list = [arg for arg in arg_list if arg['arg'] != '--disable-gradient-checkpointing']
run_token_classification_parser = parser.add_parser('token-classification', description='✨ Run AutoTrain Token Classification')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_token_classification_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_token_classification_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_token_classification_parser.set_defaults(func=run_token_classification_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
def run(self):
logger.info('Running Token Classification')
if self.args.train:
params = TokenClassificationParams(**vars(self.args))
params = token_clf_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/run_tools.py
from argparse import ArgumentParser
from . import BaseAutoTrainCommand
def run_tools_command_factory(args):
return RunAutoTrainToolsCommand(args)
class RunAutoTrainToolsCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_app_parser = parser.add_parser('tools', help='Run AutoTrain tools')
subparsers = run_app_parser.add_subparsers(title='tools', dest='tool_name')
merge_llm_parser = subparsers.add_parser('merge-llm-adapter', help='Merge LLM Adapter tool')
merge_llm_parser.add_argument('--base-model-path', type=str, help='Base model path')
merge_llm_parser.add_argument('--adapter-path', type=str, help='Adapter path')
merge_llm_parser.add_argument('--token', type=str, help='Token', default=None, required=False)
merge_llm_parser.add_argument('--pad-to-multiple-of', type=int, help='Pad to multiple of', default=None, required=False)
merge_llm_parser.add_argument('--output-folder', type=str, help='Output folder', required=False, default=None)
merge_llm_parser.add_argument('--push-to-hub', action='store_true', help='Push to Hugging Face Hub', required=False)
merge_llm_parser.set_defaults(func=run_tools_command_factory, merge_llm_adapter=True)
convert_to_kohya_parser = subparsers.add_parser('convert_to_kohya', help='Convert to Kohya tool')
convert_to_kohya_parser.add_argument('--input-path', type=str, help='Input path')
convert_to_kohya_parser.add_argument('--output-path', type=str, help='Output path')
convert_to_kohya_parser.set_defaults(func=run_tools_command_factory, convert_to_kohya=True)
def __init__(self, args):
self.args = args
def run(self):
if getattr(self.args, 'merge_llm_adapter', False):
self.run_merge_llm_adapter()
if getattr(self.args, 'convert_to_kohya', False):
self.run_convert_to_kohya()
def run_merge_llm_adapter(self):
from autotrain.tools.merge_adapter import merge_llm_adapter
merge_llm_adapter(base_model_path=self.args.base_model_path, adapter_path=self.args.adapter_path, token=self.args.token, output_folder=self.args.output_folder, pad_to_multiple_of=self.args.pad_to_multiple_of, push_to_hub=self.args.push_to_hub)
def run_convert_to_kohya(self):
from autotrain.tools.convert_to_kohya import convert_to_kohya
convert_to_kohya(input_path=self.args.input_path, output_path=self.args.output_path)
# File: autotrain-advanced-main/src/autotrain/cli/run_vlm.py
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli.utils import get_field_info, vlm_munge_data
from autotrain.project import AutoTrainProject
from autotrain.trainers.vlm.params import VLMTrainingParams
from . import BaseAutoTrainCommand
def run_vlm_command_factory(args):
return RunAutoTrainVLMCommand(args)
class RunAutoTrainVLMCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = get_field_info(VLMTrainingParams)
arg_list = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--backend', 'help': 'Backend', 'required': False, 'type': str, 'default': 'local'}] + arg_list
run_image_regression_parser = parser.add_parser('vlm', description='✨ Run AutoTrain VLM')
for arg in arg_list:
names = [arg['arg']] + arg.get('alias', [])
if 'action' in arg:
run_image_regression_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), action=arg.get('action'), default=arg.get('default'))
else:
run_image_regression_parser.add_argument(*names, dest=arg['arg'].replace('--', '').replace('-', '_'), help=arg['help'], required=arg.get('required', False), type=arg.get('type'), default=arg.get('default'), choices=arg.get('choices'))
run_image_regression_parser.set_defaults(func=run_vlm_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = ['train', 'deploy', 'inference', 'auto_find_batch_size', 'push_to_hub']
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError('Project name must be specified')
if self.args.data_path is None:
raise ValueError('Data path must be specified')
if self.args.model is None:
raise ValueError('Model must be specified')
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError('Username must be specified for push to hub')
else:
raise ValueError('Must specify --train, --deploy or --inference')
if self.args.backend.startswith('spaces') or self.args.backend.startswith('ep-'):
if not self.args.push_to_hub:
raise ValueError('Push to hub must be specified for spaces backend')
if self.args.username is None:
raise ValueError('Username must be specified for spaces backend')
if self.args.token is None:
raise ValueError('Token must be specified for spaces backend')
def run(self):
logger.info('Running Image Regression')
if self.args.train:
params = VLMTrainingParams(**vars(self.args))
params = vlm_munge_data(params, local=self.args.backend.startswith('local'))
project = AutoTrainProject(params=params, backend=self.args.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/cli/utils.py
import os
from typing import Any, Type
from autotrain.backends.base import AVAILABLE_HARDWARE
from autotrain.dataset import AutoTrainDataset, AutoTrainDreamboothDataset, AutoTrainImageClassificationDataset, AutoTrainImageRegressionDataset, AutoTrainObjectDetectionDataset, AutoTrainVLMDataset
def common_args():
args = [{'arg': '--train', 'help': 'Command to train the model', 'required': False, 'action': 'store_true'}, {'arg': '--deploy', 'help': 'Command to deploy the model (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--inference', 'help': 'Command to run inference (limited availability)', 'required': False, 'action': 'store_true'}, {'arg': '--username', 'help': 'Hugging Face Hub Username', 'required': False, 'type': str}, {'arg': '--backend', 'help': 'Backend to use: default or spaces. Spaces backend requires push_to_hub & username. Advanced users only.', 'required': False, 'type': str, 'default': 'local', 'choices': AVAILABLE_HARDWARE.keys()}, {'arg': '--token', 'help': 'Your Hugging Face API token. Token must have write access to the model hub.', 'required': False, 'type': str}, {'arg': '--push-to-hub', 'help': 'Push to hub after training will push the trained model to the Hugging Face model hub.', 'required': False, 'action': 'store_true'}, {'arg': '--model', 'help': 'Base model to use for training', 'required': True, 'type': str}, {'arg': '--project-name', 'help': 'Output directory / repo id for trained model (must be unique on hub)', 'required': True, 'type': str}, {'arg': '--data-path', 'help': 'Train dataset to use. When using cli, this should be a directory path containing training and validation data in appropriate formats', 'required': False, 'type': str}, {'arg': '--train-split', 'help': 'Train dataset split to use', 'required': False, 'type': str, 'default': 'train'}, {'arg': '--valid-split', 'help': 'Validation dataset split to use', 'required': False, 'type': str, 'default': None}, {'arg': '--batch-size', 'help': 'Training batch size to use', 'required': False, 'type': int, 'default': 2, 'alias': ['--train-batch-size']}, {'arg': '--seed', 'help': 'Random seed for reproducibility', 'required': False, 'default': 42, 'type': int}, {'arg': '--epochs', 'help': 'Number of training epochs', 'required': False, 'default': 1, 'type': int}, {'arg': '--gradient-accumulation', 'help': 'Gradient accumulation steps', 'required': False, 'default': 1, 'type': int, 'alias': ['--gradient-accumulation']}, {'arg': '--disable-gradient-checkpointing', 'help': 'Disable gradient checkpointing', 'required': False, 'action': 'store_true', 'alias': ['--disable-gradient-checkpointing', '--disable-gc']}, {'arg': '--lr', 'help': 'Learning rate', 'required': False, 'default': 0.0005, 'type': float}, {'arg': '--log', 'help': 'Use experiment tracking', 'required': False, 'type': str, 'default': 'none', 'choices': ['none', 'wandb', 'tensorboard']}]
return args
def python_type_from_schema_field(field_data: dict) -> Type:
type_map = {'string': str, 'number': float, 'integer': int, 'boolean': bool}
field_type = field_data.get('type')
if field_type:
return type_map.get(field_type, str)
elif 'anyOf' in field_data:
for type_option in field_data['anyOf']:
if type_option['type'] != 'null':
return type_map.get(type_option['type'], str)
return str
def get_default_value(field_data: dict) -> Any:
return field_data['default']
def get_field_info(params_class):
schema = params_class.model_json_schema()
properties = schema.get('properties', {})
field_info = []
for (field_name, field_data) in properties.items():
temp_info = {'arg': f"--{field_name.replace('_', '-')}", 'alias': [f'--{field_name}', f"--{field_name.replace('_', '-')}"], 'type': python_type_from_schema_field(field_data), 'help': field_data.get('title', ''), 'default': get_default_value(field_data)}
if temp_info['type'] == bool:
temp_info['action'] = 'store_true'
field_info.append(temp_info)
return field_info
def tabular_munge_data(params, local):
if isinstance(params.target_columns, str):
col_map_label = [params.target_columns]
else:
col_map_label = params.target_columns
task = params.task
if task == 'classification' and len(col_map_label) > 1:
task = 'tabular_multi_label_classification'
elif task == 'classification' and len(col_map_label) == 1:
task = 'tabular_multi_class_classification'
elif task == 'regression' and len(col_map_label) > 1:
task = 'tabular_multi_column_regression'
elif task == 'regression' and len(col_map_label) == 1:
task = 'tabular_single_column_regression'
else:
raise Exception('Please select a valid task.')
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(train_data=[train_data_path], task=task, token=params.token, project_name=params.project_name, username=params.username, column_mapping={'id': params.id_column, 'label': col_map_label}, valid_data=[valid_data_path] if valid_data_path is not None else None, percent_valid=None, local=local, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.id_column = 'autotrain_id'
if len(col_map_label) == 1:
params.target_columns = ['autotrain_label']
else:
params.target_columns = [f'autotrain_label_{i}' for i in range(len(col_map_label))]
return params
def llm_munge_data(params, local):
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
col_map = {'text': params.text_column}
if params.rejected_text_column is not None:
col_map['rejected_text'] = params.rejected_text_column
if params.prompt_text_column is not None:
col_map['prompt'] = params.prompt_text_column
dset = AutoTrainDataset(train_data=[train_data_path], task='lm_training', token=params.token, project_name=params.project_name, username=params.username, column_mapping=col_map, valid_data=[valid_data_path] if valid_data_path is not None else None, percent_valid=None, local=local, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = None
params.text_column = 'autotrain_text'
params.rejected_text_column = 'autotrain_rejected_text'
params.prompt_text_column = 'autotrain_prompt'
return params
def seq2seq_munge_data(params, local):
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(train_data=[train_data_path], task='seq2seq', token=params.token, project_name=params.project_name, username=params.username, column_mapping={'text': params.text_column, 'label': params.target_column}, valid_data=[valid_data_path] if valid_data_path is not None else None, percent_valid=None, local=local, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.text_column = 'autotrain_text'
params.target_column = 'autotrain_label'
return params
def text_clf_munge_data(params, local):
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(train_data=[train_data_path], valid_data=[valid_data_path] if valid_data_path is not None else None, task='text_multi_class_classification', token=params.token, project_name=params.project_name, username=params.username, column_mapping={'text': params.text_column, 'label': params.target_column}, percent_valid=None, local=local, convert_to_class_label=True, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.text_column = 'autotrain_text'
params.target_column = 'autotrain_label'
return params
def text_reg_munge_data(params, local):
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(train_data=[train_data_path], valid_data=[valid_data_path] if valid_data_path is not None else None, task='text_single_column_regression', token=params.token, project_name=params.project_name, username=params.username, column_mapping={'text': params.text_column, 'label': params.target_column}, percent_valid=None, local=local, convert_to_class_label=False, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.text_column = 'autotrain_text'
params.target_column = 'autotrain_label'
return params
def token_clf_munge_data(params, local):
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(train_data=[train_data_path], valid_data=[valid_data_path] if valid_data_path is not None else None, task='text_token_classification', token=params.token, project_name=params.project_name, username=params.username, column_mapping={'text': params.tokens_column, 'label': params.tags_column}, percent_valid=None, local=local, convert_to_class_label=True, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.text_column = 'autotrain_text'
params.target_column = 'autotrain_label'
return params
def img_clf_munge_data(params, local):
train_data_path = f'{params.data_path}/{params.train_split}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}'
else:
valid_data_path = None
if os.path.isdir(train_data_path):
dset = AutoTrainImageClassificationDataset(train_data=train_data_path, valid_data=valid_data_path, token=params.token, project_name=params.project_name, username=params.username, local=local)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.image_column = 'autotrain_image'
params.target_column = 'autotrain_label'
return params
def dreambooth_munge_data(params, local):
if os.path.isdir(params.image_path):
training_data = [os.path.join(params.image_path, f) for f in os.listdir(params.image_path)]
dset = AutoTrainDreamboothDataset(concept_images=training_data, concept_name=params.prompt, token=params.token, project_name=params.project_name, username=params.username, local=local)
params.image_path = dset.prepare()
return params
def img_obj_detect_munge_data(params, local):
train_data_path = f'{params.data_path}/{params.train_split}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}'
else:
valid_data_path = None
if os.path.isdir(train_data_path):
dset = AutoTrainObjectDetectionDataset(train_data=train_data_path, valid_data=valid_data_path, token=params.token, project_name=params.project_name, username=params.username, local=local)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.image_column = 'autotrain_image'
params.objects_column = 'autotrain_objects'
return params
def sent_transformers_munge_data(params, local):
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(train_data=[train_data_path], valid_data=[valid_data_path] if valid_data_path is not None else None, task='sentence_transformers', token=params.token, project_name=params.project_name, username=params.username, column_mapping={'sentence1': params.sentence1_column, 'sentence2': params.sentence2_column, 'sentence3': params.sentence3_column, 'target': params.target_column}, percent_valid=None, local=local, convert_to_class_label=True if params.trainer == 'pair_class' else False, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.sentence1_column = 'autotrain_sentence1'
params.sentence2_column = 'autotrain_sentence2'
params.sentence3_column = 'autotrain_sentence3'
params.target_column = 'autotrain_target'
return params
def img_reg_munge_data(params, local):
train_data_path = f'{params.data_path}/{params.train_split}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}'
else:
valid_data_path = None
if os.path.isdir(train_data_path):
dset = AutoTrainImageRegressionDataset(train_data=train_data_path, valid_data=valid_data_path, token=params.token, project_name=params.project_name, username=params.username, local=local)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.image_column = 'autotrain_image'
params.target_column = 'autotrain_label'
return params
def vlm_munge_data(params, local):
train_data_path = f'{params.data_path}/{params.train_split}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
col_map = {'text': params.text_column}
if params.prompt_text_column is not None:
col_map['prompt'] = params.prompt_text_column
dset = AutoTrainVLMDataset(train_data=train_data_path, token=params.token, project_name=params.project_name, username=params.username, column_mapping=col_map, valid_data=valid_data_path if valid_data_path is not None else None, percent_valid=None, local=local)
params.data_path = dset.prepare()
params.text_column = 'autotrain_text'
params.image_column = 'autotrain_image'
params.prompt_text_column = 'autotrain_prompt'
return params
def ext_qa_munge_data(params, local):
exts = ['csv', 'jsonl']
ext_to_use = None
for ext in exts:
path = f'{params.data_path}/{params.train_split}.{ext}'
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f'{params.data_path}/{params.train_split}.{ext_to_use}'
if params.valid_split is not None:
valid_data_path = f'{params.data_path}/{params.valid_split}.{ext_to_use}'
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(train_data=[train_data_path], valid_data=[valid_data_path] if valid_data_path is not None else None, task='text_extractive_question_answering', token=params.token, project_name=params.project_name, username=params.username, column_mapping={'text': params.text_column, 'question': params.question_column, 'answer': params.answer_column}, percent_valid=None, local=local, convert_to_class_label=True, ext=ext_to_use)
params.data_path = dset.prepare()
params.valid_split = 'validation'
params.text_column = 'autotrain_text'
params.question_column = 'autotrain_question'
params.answer_column = 'autotrain_answer'
return params
# File: autotrain-advanced-main/src/autotrain/commands.py
import os
import shlex
import torch
from autotrain import logger
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.generic.params import GenericParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
def launch_command(params):
params.project_name = shlex.split(params.project_name)[0]
cuda_available = torch.cuda.is_available()
mps_available = torch.backends.mps.is_available()
if cuda_available:
num_gpus = torch.cuda.device_count()
elif mps_available:
num_gpus = 1
else:
num_gpus = 0
if isinstance(params, LLMTrainingParams):
if num_gpus == 0:
logger.warning('No GPU found. Forcing training on CPU. This will be super slow!')
cmd = ['accelerate', 'launch', '--cpu']
elif num_gpus == 1:
cmd = ['accelerate', 'launch', '--num_machines', '1', '--num_processes', '1']
elif num_gpus == 2:
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', '2']
elif params.quantization in ('int8', 'int4') and params.peft and (params.mixed_precision == 'bf16'):
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', str(num_gpus)]
else:
cmd = ['accelerate', 'launch', '--use_deepspeed', '--zero_stage', '3', '--offload_optimizer_device', 'none', '--offload_param_device', 'none', '--zero3_save_16bit_model', 'true', '--zero3_init_flag', 'true', '--deepspeed_multinode_launcher', 'standard', '--gradient_accumulation_steps', str(params.gradient_accumulation)]
if num_gpus > 0:
cmd.append('--mixed_precision')
if params.mixed_precision == 'fp16':
cmd.append('fp16')
elif params.mixed_precision == 'bf16':
cmd.append('bf16')
else:
cmd.append('no')
cmd.extend(['-m', 'autotrain.trainers.clm', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, DreamBoothTrainingParams):
cmd = ['python', '-m', 'autotrain.trainers.dreambooth', '--training_config', os.path.join(params.project_name, 'training_params.json')]
elif isinstance(params, GenericParams):
cmd = ['python', '-m', 'autotrain.trainers.generic', '--config', os.path.join(params.project_name, 'training_params.json')]
elif isinstance(params, TabularParams):
cmd = ['python', '-m', 'autotrain.trainers.tabular', '--training_config', os.path.join(params.project_name, 'training_params.json')]
elif isinstance(params, TextClassificationParams) or isinstance(params, TextRegressionParams) or isinstance(params, SentenceTransformersParams) or isinstance(params, ExtractiveQuestionAnsweringParams):
if num_gpus == 0:
cmd = ['accelerate', 'launch', '--cpu']
elif num_gpus == 1:
cmd = ['accelerate', 'launch', '--num_machines', '1', '--num_processes', '1']
else:
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', str(num_gpus)]
if num_gpus > 0:
cmd.append('--mixed_precision')
if params.mixed_precision == 'fp16':
cmd.append('fp16')
elif params.mixed_precision == 'bf16':
cmd.append('bf16')
else:
cmd.append('no')
if isinstance(params, TextRegressionParams):
cmd.extend(['-m', 'autotrain.trainers.text_regression', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, SentenceTransformersParams):
cmd.extend(['-m', 'autotrain.trainers.sent_transformers', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, ExtractiveQuestionAnsweringParams):
cmd.extend(['-m', 'autotrain.trainers.extractive_question_answering', '--training_config', os.path.join(params.project_name, 'training_params.json')])
else:
cmd.extend(['-m', 'autotrain.trainers.text_classification', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, TokenClassificationParams):
if num_gpus == 0:
cmd = ['accelerate', 'launch', '--cpu']
elif num_gpus == 1:
cmd = ['accelerate', 'launch', '--num_machines', '1', '--num_processes', '1']
else:
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', str(num_gpus)]
if num_gpus > 0:
cmd.append('--mixed_precision')
if params.mixed_precision == 'fp16':
cmd.append('fp16')
elif params.mixed_precision == 'bf16':
cmd.append('bf16')
else:
cmd.append('no')
cmd.extend(['-m', 'autotrain.trainers.token_classification', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, ImageClassificationParams) or isinstance(params, ObjectDetectionParams) or isinstance(params, ImageRegressionParams):
if num_gpus == 0:
cmd = ['accelerate', 'launch', '--cpu']
elif num_gpus == 1:
cmd = ['accelerate', 'launch', '--num_machines', '1', '--num_processes', '1']
else:
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', str(num_gpus)]
if num_gpus > 0:
cmd.append('--mixed_precision')
if params.mixed_precision == 'fp16':
cmd.append('fp16')
elif params.mixed_precision == 'bf16':
cmd.append('bf16')
else:
cmd.append('no')
if isinstance(params, ObjectDetectionParams):
cmd.extend(['-m', 'autotrain.trainers.object_detection', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, ImageRegressionParams):
cmd.extend(['-m', 'autotrain.trainers.image_regression', '--training_config', os.path.join(params.project_name, 'training_params.json')])
else:
cmd.extend(['-m', 'autotrain.trainers.image_classification', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, Seq2SeqParams):
if num_gpus == 0:
logger.warning('No GPU found. Forcing training on CPU. This will be super slow!')
cmd = ['accelerate', 'launch', '--cpu']
elif num_gpus == 1:
cmd = ['accelerate', 'launch', '--num_machines', '1', '--num_processes', '1']
elif num_gpus == 2:
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', '2']
elif params.quantization in ('int8', 'int4') and params.peft and (params.mixed_precision == 'bf16'):
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', str(num_gpus)]
else:
cmd = ['accelerate', 'launch', '--use_deepspeed', '--zero_stage', '3', '--offload_optimizer_device', 'none', '--offload_param_device', 'none', '--zero3_save_16bit_model', 'true', '--zero3_init_flag', 'true', '--deepspeed_multinode_launcher', 'standard', '--gradient_accumulation_steps', str(params.gradient_accumulation)]
if num_gpus > 0:
cmd.append('--mixed_precision')
if params.mixed_precision == 'fp16':
cmd.append('fp16')
elif params.mixed_precision == 'bf16':
cmd.append('bf16')
else:
cmd.append('no')
cmd.extend(['-m', 'autotrain.trainers.seq2seq', '--training_config', os.path.join(params.project_name, 'training_params.json')])
elif isinstance(params, VLMTrainingParams):
if num_gpus == 0:
logger.warning('No GPU found. Forcing training on CPU. This will be super slow!')
cmd = ['accelerate', 'launch', '--cpu']
elif num_gpus == 1:
cmd = ['accelerate', 'launch', '--num_machines', '1', '--num_processes', '1']
elif num_gpus == 2:
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', '2']
elif params.quantization in ('int8', 'int4') and params.peft and (params.mixed_precision == 'bf16'):
cmd = ['accelerate', 'launch', '--multi_gpu', '--num_machines', '1', '--num_processes', str(num_gpus)]
else:
cmd = ['accelerate', 'launch', '--use_deepspeed', '--zero_stage', '3', '--offload_optimizer_device', 'none', '--offload_param_device', 'none', '--zero3_save_16bit_model', 'true', '--zero3_init_flag', 'true', '--deepspeed_multinode_launcher', 'standard', '--gradient_accumulation_steps', str(params.gradient_accumulation)]
if num_gpus > 0:
cmd.append('--mixed_precision')
if params.mixed_precision == 'fp16':
cmd.append('fp16')
elif params.mixed_precision == 'bf16':
cmd.append('bf16')
else:
cmd.append('no')
cmd.extend(['-m', 'autotrain.trainers.vlm', '--training_config', os.path.join(params.project_name, 'training_params.json')])
else:
raise ValueError('Unsupported params type')
logger.info(cmd)
logger.info(params)
return cmd
# File: autotrain-advanced-main/src/autotrain/dataset.py
import io
import os
import uuid
import zipfile
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import pandas as pd
from autotrain.preprocessor.dreambooth import DreamboothPreprocessor
from autotrain.preprocessor.tabular import TabularBinaryClassificationPreprocessor, TabularMultiClassClassificationPreprocessor, TabularMultiColumnRegressionPreprocessor, TabularMultiLabelClassificationPreprocessor, TabularSingleColumnRegressionPreprocessor
from autotrain.preprocessor.text import LLMPreprocessor, SentenceTransformersPreprocessor, Seq2SeqPreprocessor, TextBinaryClassificationPreprocessor, TextExtractiveQuestionAnsweringPreprocessor, TextMultiClassClassificationPreprocessor, TextSingleColumnRegressionPreprocessor, TextTokenClassificationPreprocessor
from autotrain.preprocessor.vision import ImageClassificationPreprocessor, ImageRegressionPreprocessor, ObjectDetectionPreprocessor
from autotrain.preprocessor.vlm import VLMPreprocessor
def remove_non_image_files(folder):
allowed_extensions = {'.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG', '.jsonl'}
for (root, dirs, files) in os.walk(folder):
for file in files:
file_extension = os.path.splitext(file)[1]
if file_extension.lower() not in allowed_extensions:
file_path = os.path.join(root, file)
os.remove(file_path)
print(f'Removed file: {file_path}')
for subfolder in dirs:
remove_non_image_files(os.path.join(root, subfolder))
@dataclass
class AutoTrainDreamboothDataset:
concept_images: List[Any]
concept_name: str
token: str
project_name: str
username: Optional[str] = None
local: bool = False
def __str__(self) -> str:
info = f'Dataset: {self.project_name} ({self.task})\n'
return info
def __post_init__(self):
self.task = 'dreambooth'
@property
def num_samples(self):
return len(self.concept_images)
def prepare(self):
preprocessor = DreamboothPreprocessor(concept_images=self.concept_images, concept_name=self.concept_name, token=self.token, project_name=self.project_name, username=self.username, local=self.local)
return preprocessor.prepare()
@dataclass
class AutoTrainImageClassificationDataset:
train_data: str
token: str
project_name: str
username: str
valid_data: Optional[str] = None
percent_valid: Optional[float] = None
local: bool = False
def __str__(self) -> str:
info = f'Dataset: {self.project_name} ({self.task})\n'
info += f'Train data: {self.train_data}\n'
info += f'Valid data: {self.valid_data}\n'
return info
def __post_init__(self):
self.task = 'image_multi_class_classification'
if not self.valid_data and self.percent_valid is None:
self.percent_valid = 0.2
elif self.valid_data and self.percent_valid is not None:
raise ValueError('You can only specify one of valid_data or percent_valid')
elif self.valid_data:
self.percent_valid = 0.0
def prepare(self):
valid_dir = None
if not isinstance(self.train_data, str):
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
random_uuid = uuid.uuid4()
train_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(train_dir, exist_ok=True)
self.train_data.seek(0)
content = self.train_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(train_dir)
macosx_dir = os.path.join(train_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(train_dir)
if self.valid_data:
random_uuid = uuid.uuid4()
valid_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(valid_dir, exist_ok=True)
self.valid_data.seek(0)
content = self.valid_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(valid_dir)
macosx_dir = os.path.join(valid_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(valid_dir)
else:
train_dir = self.train_data
if self.valid_data:
valid_dir = self.valid_data
preprocessor = ImageClassificationPreprocessor(train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local)
return preprocessor.prepare()
@dataclass
class AutoTrainObjectDetectionDataset:
train_data: str
token: str
project_name: str
username: str
valid_data: Optional[str] = None
percent_valid: Optional[float] = None
local: bool = False
def __str__(self) -> str:
info = f'Dataset: {self.project_name} ({self.task})\n'
info += f'Train data: {self.train_data}\n'
info += f'Valid data: {self.valid_data}\n'
return info
def __post_init__(self):
self.task = 'image_object_detection'
if not self.valid_data and self.percent_valid is None:
self.percent_valid = 0.2
elif self.valid_data and self.percent_valid is not None:
raise ValueError('You can only specify one of valid_data or percent_valid')
elif self.valid_data:
self.percent_valid = 0.0
def prepare(self):
valid_dir = None
if not isinstance(self.train_data, str):
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
random_uuid = uuid.uuid4()
train_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(train_dir, exist_ok=True)
self.train_data.seek(0)
content = self.train_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(train_dir)
macosx_dir = os.path.join(train_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(train_dir)
if self.valid_data:
random_uuid = uuid.uuid4()
valid_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(valid_dir, exist_ok=True)
self.valid_data.seek(0)
content = self.valid_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(valid_dir)
macosx_dir = os.path.join(valid_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(valid_dir)
else:
train_dir = self.train_data
if self.valid_data:
valid_dir = self.valid_data
preprocessor = ObjectDetectionPreprocessor(train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local)
return preprocessor.prepare()
@dataclass
class AutoTrainVLMDataset:
train_data: str
token: str
project_name: str
username: str
column_mapping: Dict[str, str]
valid_data: Optional[str] = None
percent_valid: Optional[float] = None
local: bool = False
def __str__(self) -> str:
info = f'Dataset: {self.project_name} ({self.task})\n'
info += f'Train data: {self.train_data}\n'
info += f'Valid data: {self.valid_data}\n'
return info
def __post_init__(self):
self.task = 'vlm'
if not self.valid_data and self.percent_valid is None:
self.percent_valid = 0.2
elif self.valid_data and self.percent_valid is not None:
raise ValueError('You can only specify one of valid_data or percent_valid')
elif self.valid_data:
self.percent_valid = 0.0
def prepare(self):
valid_dir = None
if not isinstance(self.train_data, str):
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
random_uuid = uuid.uuid4()
train_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(train_dir, exist_ok=True)
self.train_data.seek(0)
content = self.train_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(train_dir)
macosx_dir = os.path.join(train_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(train_dir)
if self.valid_data:
random_uuid = uuid.uuid4()
valid_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(valid_dir, exist_ok=True)
self.valid_data.seek(0)
content = self.valid_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(valid_dir)
macosx_dir = os.path.join(valid_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(valid_dir)
else:
train_dir = self.train_data
if self.valid_data:
valid_dir = self.valid_data
preprocessor = VLMPreprocessor(train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local, column_mapping=self.column_mapping)
return preprocessor.prepare()
@dataclass
class AutoTrainImageRegressionDataset:
train_data: str
token: str
project_name: str
username: str
valid_data: Optional[str] = None
percent_valid: Optional[float] = None
local: bool = False
def __str__(self) -> str:
info = f'Dataset: {self.project_name} ({self.task})\n'
info += f'Train data: {self.train_data}\n'
info += f'Valid data: {self.valid_data}\n'
return info
def __post_init__(self):
self.task = 'image_single_column_regression'
if not self.valid_data and self.percent_valid is None:
self.percent_valid = 0.2
elif self.valid_data and self.percent_valid is not None:
raise ValueError('You can only specify one of valid_data or percent_valid')
elif self.valid_data:
self.percent_valid = 0.0
def prepare(self):
valid_dir = None
if not isinstance(self.train_data, str):
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
random_uuid = uuid.uuid4()
train_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(train_dir, exist_ok=True)
self.train_data.seek(0)
content = self.train_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(train_dir)
macosx_dir = os.path.join(train_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(train_dir)
if self.valid_data:
random_uuid = uuid.uuid4()
valid_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
os.makedirs(valid_dir, exist_ok=True)
self.valid_data.seek(0)
content = self.valid_data.read()
bytes_io = io.BytesIO(content)
zip_ref = zipfile.ZipFile(bytes_io, 'r')
zip_ref.extractall(valid_dir)
macosx_dir = os.path.join(valid_dir, '__MACOSX')
if os.path.exists(macosx_dir):
os.system(f'rm -rf {macosx_dir}')
remove_non_image_files(valid_dir)
else:
train_dir = self.train_data
if self.valid_data:
valid_dir = self.valid_data
preprocessor = ImageRegressionPreprocessor(train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local)
return preprocessor.prepare()
@dataclass
class AutoTrainDataset:
train_data: List[str]
task: str
token: str
project_name: str
username: Optional[str] = None
column_mapping: Optional[Dict[str, str]] = None
valid_data: Optional[List[str]] = None
percent_valid: Optional[float] = None
convert_to_class_label: Optional[bool] = False
local: bool = False
ext: Optional[str] = 'csv'
def __str__(self) -> str:
info = f'Dataset: {self.project_name} ({self.task})\n'
info += f'Train data: {self.train_data}\n'
info += f'Valid data: {self.valid_data}\n'
info += f'Column mapping: {self.column_mapping}\n'
return info
def __post_init__(self):
if self.valid_data is None:
self.valid_data = []
if not self.valid_data and self.percent_valid is None:
self.percent_valid = 0.2
elif self.valid_data and self.percent_valid is not None:
raise ValueError('You can only specify one of valid_data or percent_valid')
elif self.valid_data:
self.percent_valid = 0.0
(self.train_df, self.valid_df) = self._preprocess_data()
def _preprocess_data(self):
train_df = []
for file in self.train_data:
if isinstance(file, pd.DataFrame):
train_df.append(file)
elif self.ext == 'jsonl':
train_df.append(pd.read_json(file, lines=True))
else:
train_df.append(pd.read_csv(file))
if len(train_df) > 1:
train_df = pd.concat(train_df)
else:
train_df = train_df[0]
valid_df = None
if len(self.valid_data) > 0:
valid_df = []
for file in self.valid_data:
if isinstance(file, pd.DataFrame):
valid_df.append(file)
elif self.ext == 'jsonl':
valid_df.append(pd.read_json(file, lines=True))
else:
valid_df.append(pd.read_csv(file))
if len(valid_df) > 1:
valid_df = pd.concat(valid_df)
else:
valid_df = valid_df[0]
return (train_df, valid_df)
@property
def num_samples(self):
return len(self.train_df) + len(self.valid_df) if self.valid_df is not None else len(self.train_df)
def prepare(self):
if self.task == 'text_binary_classification':
text_column = self.column_mapping['text']
label_column = self.column_mapping['label']
preprocessor = TextBinaryClassificationPreprocessor(train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, convert_to_class_label=self.convert_to_class_label, local=self.local)
return preprocessor.prepare()
elif self.task == 'text_multi_class_classification':
text_column = self.column_mapping['text']
label_column = self.column_mapping['label']
preprocessor = TextMultiClassClassificationPreprocessor(train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, convert_to_class_label=self.convert_to_class_label, local=self.local)
return preprocessor.prepare()
elif self.task == 'text_token_classification':
text_column = self.column_mapping['text']
label_column = self.column_mapping['label']
preprocessor = TextTokenClassificationPreprocessor(train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, convert_to_class_label=self.convert_to_class_label)
return preprocessor.prepare()
elif self.task == 'text_single_column_regression':
text_column = self.column_mapping['text']
label_column = self.column_mapping['label']
preprocessor = TextSingleColumnRegressionPreprocessor(train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'seq2seq':
text_column = self.column_mapping['text']
label_column = self.column_mapping['label']
preprocessor = Seq2SeqPreprocessor(train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'lm_training':
text_column = self.column_mapping['text']
prompt_column = self.column_mapping.get('prompt')
rejected_text_column = self.column_mapping.get('rejected_text')
preprocessor = LLMPreprocessor(train_data=self.train_df, text_column=text_column, prompt_column=prompt_column, rejected_text_column=rejected_text_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'sentence_transformers':
sentence1_column = self.column_mapping['sentence1']
sentence2_column = self.column_mapping['sentence2']
sentence3_column = self.column_mapping.get('sentence3')
target_column = self.column_mapping.get('target')
preprocessor = SentenceTransformersPreprocessor(train_data=self.train_df, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, sentence1_column=sentence1_column, sentence2_column=sentence2_column, sentence3_column=sentence3_column, target_column=target_column, convert_to_class_label=self.convert_to_class_label)
return preprocessor.prepare()
elif self.task == 'text_extractive_question_answering':
text_column = self.column_mapping['text']
question_column = self.column_mapping['question']
answer_column = self.column_mapping['answer']
preprocessor = TextExtractiveQuestionAnsweringPreprocessor(train_data=self.train_df, text_column=text_column, question_column=question_column, answer_column=answer_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'tabular_binary_classification':
id_column = self.column_mapping['id']
label_column = self.column_mapping['label'][0]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularBinaryClassificationPreprocessor(train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'tabular_multi_class_classification':
id_column = self.column_mapping['id']
label_column = self.column_mapping['label'][0]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularMultiClassClassificationPreprocessor(train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'tabular_single_column_regression':
id_column = self.column_mapping['id']
label_column = self.column_mapping['label'][0]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularSingleColumnRegressionPreprocessor(train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'tabular_multi_column_regression':
id_column = self.column_mapping['id']
label_column = self.column_mapping['label']
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularMultiColumnRegressionPreprocessor(train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
elif self.task == 'tabular_multi_label_classification':
id_column = self.column_mapping['id']
label_column = self.column_mapping['label']
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularMultiLabelClassificationPreprocessor(train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local)
return preprocessor.prepare()
else:
raise ValueError(f'Task {self.task} not supported')
# File: autotrain-advanced-main/src/autotrain/help.py
autotrain_user_info = '\nPlease choose the user or organization who is creating the AutoTrain Project.
\nIn case of non-free tier, this user or organization will be billed.
\n'
project_name_info = 'A unique name for the AutoTrain Project.\nThis name will be used to identify the project in the AutoTrain dashboard.'
column_mapping_info = '\nColumn Mapping is used to map the columns in the dataset to the columns in the AutoTrain Project.
\nFor example, if your dataset has a column named "input" and you want to use it as the input for the model,\nyou can map it to the "text" column in the AutoTrain Project.
\nSimilarly, if your dataset has a column named "label" and you want to use it as the label for the model,\nyou can map it to the "target" column in the AutoTrain Project.
\nColumn mapping keys are AutoTrain Project column names and values are your dataset column names.
\nFor tabular datasets, you can map multiple targets to the "label" column. This will enable multi-label task.\nThe column names must be a comma separated list.
\nFor other tasks, mappings are one-to-one.
\nNote: column names are case sensitive.
\n'
base_model_info = '\nBase Model is the model that will be used for fine-tuning.
\nFor example, if you are training a text classification model, you can choose a base model like "bert-base-uncased".
\nFor a list of available models, please see HuggingFace Model Hub.
\nNote: not all models listed here are going to be compatible with\nyour data and parameters. You should select a model that is compatible with your task, data and parameters.
\nDont see your favorite model? You can also use a custom model by providing the model name in an environment variable: AUTOTRAIN_CUSTOM_MODELS.\nFor example, go to settings and add a new environment variable with the key AUTOTRAIN_CUSTOM_MODELS and value as the model name (e.g. google/gemma-7b)\n'
hardware_info = '\n\nHardware is the machine that will be used for training.
\nPlease choose a hardware that is compatible with your task, data and parameters.
\n'
task_info = '\nTask is the type of model you want to train.
\nPlease choose a task that is compatible with your data and parameters.
\nFor example, if you are training a text classification model, you can choose "Text Classification" task.
\n'
APP_IMAGE_CLASSIFICATION_DATA_HELP = 'The data for the Image Classification task should be in the following format:\n- The data should be in a zip file.\n- The zip file should contain multiple folders (the classes), each folder should contain images of a single class.\n- The name of the folder should be the name of the class.\n- The images must be jpeg, jpg or png.\n- There should be at least 5 images per class.\n- There should not be any other files in the zip file.\n- There should not be any other folders inside the zip folder.\n'
APP_LM_TRAINING_TYPE = 'There are two types of Language Model Training:\n- generic\n- chat\n\nIn the generic mode, you provide a CSV with a text column which has already been formatted by you for training a language model.\nIn the chat mode, you provide a CSV with two or three text columns: prompt, context (optional) and response.\nContext column can be empty for samples if not needed. You can also have a "prompt start" column. If provided, "prompt start" will be prepended before the prompt column.\n\nPlease see [this](https://huggingface.co/datasets/tatsu-lab/alpaca) dataset which has both formats in the same dataset.\n'
def get_app_help(element_id):
if element_id == 'autotrain_user_info':
return autotrain_user_info
elif element_id == 'project_name_info':
return project_name_info
elif element_id == 'column_mapping_info':
return column_mapping_info
elif element_id == 'base_model_info':
return base_model_info
elif element_id == 'hardware_info':
return hardware_info
elif element_id == 'task_info':
return task_info
else:
return 'No help available for this element.'
# File: autotrain-advanced-main/src/autotrain/logging.py
import sys
from dataclasses import dataclass
from accelerate.state import PartialState
from loguru import logger
@dataclass
class Logger:
def __post_init__(self):
self.log_format = '{level: <8} | {time:YYYY-MM-DD HH:mm:ss} | {name}:{function}:{line} - {message}'
self.logger = logger
self.setup_logger()
def _should_log(self, record):
return PartialState().is_main_process
def setup_logger(self):
self.logger.remove()
self.logger.add(sys.stdout, format=self.log_format, filter=lambda x: self._should_log(x))
def get_logger(self):
return self.logger
# File: autotrain-advanced-main/src/autotrain/parser.py
import os
from dataclasses import dataclass
import requests
import yaml
from autotrain import logger
from autotrain.cli.utils import dreambooth_munge_data, ext_qa_munge_data, img_clf_munge_data, img_obj_detect_munge_data, img_reg_munge_data, llm_munge_data, sent_transformers_munge_data, seq2seq_munge_data, tabular_munge_data, text_clf_munge_data, text_reg_munge_data, token_clf_munge_data, vlm_munge_data
from autotrain.project import AutoTrainProject
from autotrain.tasks import TASKS
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
@dataclass
class AutoTrainConfigParser:
config_path: str
def __post_init__(self):
if self.config_path.startswith('http'):
response = requests.get(self.config_path)
if response.status_code == 200:
self.config = yaml.safe_load(response.content)
else:
raise ValueError('Failed to retrieve YAML file.')
else:
with open(self.config_path, 'r') as f:
self.config = yaml.safe_load(f)
self.task_param_map = {'lm_training': LLMTrainingParams, 'dreambooth': DreamBoothTrainingParams, 'image_binary_classification': ImageClassificationParams, 'image_multi_class_classification': ImageClassificationParams, 'image_object_detection': ObjectDetectionParams, 'seq2seq': Seq2SeqParams, 'tabular': TabularParams, 'text_binary_classification': TextClassificationParams, 'text_multi_class_classification': TextClassificationParams, 'text_single_column_regression': TextRegressionParams, 'text_token_classification': TokenClassificationParams, 'sentence_transformers': SentenceTransformersParams, 'image_single_column_regression': ImageRegressionParams, 'vlm': VLMTrainingParams, 'text_extractive_question_answering': ExtractiveQuestionAnsweringParams}
self.munge_data_map = {'lm_training': llm_munge_data, 'dreambooth': dreambooth_munge_data, 'tabular': tabular_munge_data, 'seq2seq': seq2seq_munge_data, 'image_multi_class_classification': img_clf_munge_data, 'image_object_detection': img_obj_detect_munge_data, 'text_multi_class_classification': text_clf_munge_data, 'text_token_classification': token_clf_munge_data, 'text_single_column_regression': text_reg_munge_data, 'sentence_transformers': sent_transformers_munge_data, 'image_single_column_regression': img_reg_munge_data, 'vlm': vlm_munge_data, 'text_extractive_question_answering': ext_qa_munge_data}
self.task_aliases = {'llm': 'lm_training', 'llm-sft': 'lm_training', 'llm-orpo': 'lm_training', 'llm-generic': 'lm_training', 'llm-dpo': 'lm_training', 'llm-reward': 'lm_training', 'dreambooth': 'dreambooth', 'image_binary_classification': 'image_multi_class_classification', 'image-binary-classification': 'image_multi_class_classification', 'image_classification': 'image_multi_class_classification', 'image-classification': 'image_multi_class_classification', 'seq2seq': 'seq2seq', 'tabular': 'tabular', 'text_binary_classification': 'text_multi_class_classification', 'text-binary-classification': 'text_multi_class_classification', 'text_classification': 'text_multi_class_classification', 'text-classification': 'text_multi_class_classification', 'text_single_column_regression': 'text_single_column_regression', 'text-single-column-regression': 'text_single_column_regression', 'text_regression': 'text_single_column_regression', 'text-regression': 'text_single_column_regression', 'token_classification': 'text_token_classification', 'token-classification': 'text_token_classification', 'image_object_detection': 'image_object_detection', 'image-object-detection': 'image_object_detection', 'object_detection': 'image_object_detection', 'object-detection': 'image_object_detection', 'st': 'sentence_transformers', 'st:pair': 'sentence_transformers', 'st:pair_class': 'sentence_transformers', 'st:pair_score': 'sentence_transformers', 'st:triplet': 'sentence_transformers', 'st:qa': 'sentence_transformers', 'sentence-transformers:pair': 'sentence_transformers', 'sentence-transformers:pair_class': 'sentence_transformers', 'sentence-transformers:pair_score': 'sentence_transformers', 'sentence-transformers:triplet': 'sentence_transformers', 'sentence-transformers:qa': 'sentence_transformers', 'image_single_column_regression': 'image_single_column_regression', 'image-single-column-regression': 'image_single_column_regression', 'image_regression': 'image_single_column_regression', 'image-regression': 'image_single_column_regression', 'image-scoring': 'image_single_column_regression', 'vlm:captioning': 'vlm', 'vlm:vqa': 'vlm', 'extractive_question_answering': 'text_extractive_question_answering', 'ext_qa': 'text_extractive_question_answering', 'ext-qa': 'text_extractive_question_answering', 'extractive-qa': 'text_extractive_question_answering'}
task = self.config.get('task')
self.task = self.task_aliases.get(task, task)
if self.task is None:
raise ValueError('Task is required in the configuration file')
if self.task not in TASKS:
raise ValueError(f'Task `{self.task}` is not supported')
self.backend = self.config.get('backend')
if self.backend is None:
raise ValueError('Backend is required in the configuration file')
logger.info(f'Running task: {self.task}')
logger.info(f'Using backend: {self.backend}')
self.parsed_config = self._parse_config()
def _parse_config(self):
params = {'model': self.config['base_model'], 'project_name': self.config['project_name']}
if self.task == 'dreambooth':
params['image_path'] = self.config['data']['path']
params['prompt'] = self.config['data']['prompt']
else:
params['data_path'] = self.config['data']['path']
if self.task == 'lm_training':
params['chat_template'] = self.config['data']['chat_template']
if '-' in self.config['task']:
params['trainer'] = self.config['task'].split('-')[1]
if params['trainer'] == 'generic':
params['trainer'] = 'default'
if params['trainer'] not in ['sft', 'orpo', 'dpo', 'reward', 'default']:
raise ValueError('Invalid LLM training task')
if self.task == 'sentence_transformers':
params['trainer'] = self.config['task'].split(':')[1]
if self.task == 'vlm':
params['trainer'] = self.config['task'].split(':')[1]
if self.task != 'dreambooth':
for (k, v) in self.config['data']['column_mapping'].items():
params[k] = v
params['train_split'] = self.config['data']['train_split']
params['valid_split'] = self.config['data']['valid_split']
params['log'] = self.config['log']
if 'hub' in self.config:
params['username'] = self.config['hub']['username']
params['token'] = self.config['hub']['token']
params['push_to_hub'] = self.config['hub']['push_to_hub']
else:
params['username'] = None
params['token'] = None
params['push_to_hub'] = False
if params['username']:
if params['username'].startswith('${'):
params['username'] = os.environ.get(params['username'][2:-1])
if params['token']:
if params['token'].startswith('${'):
params['token'] = os.environ.get(params['token'][2:-1])
other_params = self.config.get('params')
if other_params:
params.update(other_params)
return params
def run(self):
_params = self.task_param_map[self.task](**self.parsed_config)
logger.info(_params)
_munge_fn = self.munge_data_map[self.task]
_munge_fn(_params, local=self.backend.startswith('local'))
project = AutoTrainProject(params=_params, backend=self.backend)
job_id = project.create()
logger.info(f'Job ID: {job_id}')
# File: autotrain-advanced-main/src/autotrain/preprocessor/dreambooth.py
import io
import json
import os
from dataclasses import dataclass
from typing import Any, List
from huggingface_hub import HfApi, create_repo
from autotrain import logger
@dataclass
class DreamboothPreprocessor:
concept_images: List[Any]
concept_name: str
username: str
project_name: str
token: str
local: bool
def __post_init__(self):
self.repo_name = f'{self.username}/autotrain-data-{self.project_name}'
if not self.local:
try:
create_repo(repo_id=self.repo_name, repo_type='dataset', token=self.token, private=True, exist_ok=False)
except Exception:
logger.error('Error creating repo')
raise ValueError('Error creating repo')
def _upload_concept_images(self, file, api):
logger.info(f'Uploading {file} to concept1')
if isinstance(file, str):
path_in_repo = f"concept1/{file.split('/')[-1]}"
else:
path_in_repo = f"concept1/{file.filename.split('/')[-1]}"
api.upload_file(path_or_fileobj=file if isinstance(file, str) else file.file.read(), path_in_repo=path_in_repo, repo_id=self.repo_name, repo_type='dataset', token=self.token)
def _upload_concept_prompts(self, api):
_prompts = {}
_prompts['concept1'] = self.concept_name
prompts = json.dumps(_prompts)
prompts = prompts.encode('utf-8')
prompts = io.BytesIO(prompts)
api.upload_file(path_or_fileobj=prompts, path_in_repo='prompts.json', repo_id=self.repo_name, repo_type='dataset', token=self.token)
def _save_concept_images(self, file):
logger.info('Saving concept images')
logger.info(file)
if isinstance(file, str):
_file = file
path = f"{self.project_name}/autotrain-data/concept1/{_file.split('/')[-1]}"
else:
_file = file.file.read()
path = f"{self.project_name}/autotrain-data/concept1/{file.filename.split('/')[-1]}"
os.makedirs(os.path.dirname(path), exist_ok=True)
if isinstance(file, str):
with open(_file, 'rb') as f:
with open(path, 'wb') as f2:
f2.write(f.read())
else:
with open(path, 'wb') as f:
f.write(_file)
def _save_concept_prompts(self):
_prompts = {}
_prompts['concept1'] = self.concept_name
path = f'{self.project_name}/autotrain-data/prompts.json'
with open(path, 'w', encoding='utf-8') as f:
json.dump(_prompts, f)
def prepare(self):
api = HfApi(token=self.token)
for _file in self.concept_images:
if self.local:
self._save_concept_images(_file)
else:
self._upload_concept_images(_file, api)
if self.local:
self._save_concept_prompts()
else:
self._upload_concept_prompts(api)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
# File: autotrain-advanced-main/src/autotrain/preprocessor/tabular.py
from dataclasses import dataclass
from typing import List, Optional
import pandas as pd
from datasets import Dataset, DatasetDict
from sklearn.model_selection import train_test_split
RESERVED_COLUMNS = ['autotrain_id', 'autotrain_label']
@dataclass
class TabularBinaryClassificationPreprocessor:
train_data: pd.DataFrame
label_column: str
username: str
project_name: str
token: str
id_column: Optional[str] = None
valid_data: Optional[pd.DataFrame] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
def __post_init__(self):
if self.id_column is not None:
if self.id_column not in self.train_data.columns:
raise ValueError(f'{self.id_column} not in train data')
if self.label_column not in self.train_data.columns:
raise ValueError(f'{self.label_column} not in train data')
if self.valid_data is not None:
if self.id_column is not None:
if self.id_column not in self.valid_data.columns:
raise ValueError(f'{self.id_column} not in valid data')
if self.label_column not in self.valid_data.columns:
raise ValueError(f'{self.label_column} not in valid data')
for column in RESERVED_COLUMNS:
if column in self.train_data.columns:
raise ValueError(f'{column} is a reserved column name')
if self.valid_data is not None:
if column in self.valid_data.columns:
raise ValueError(f'{column} is a reserved column name')
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed, stratify=self.train_data[self.label_column])
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare_columns(self, train_df, valid_df):
train_df.loc[:, 'autotrain_id'] = train_df[self.id_column] if self.id_column else list(range(len(train_df)))
train_df.loc[:, 'autotrain_label'] = train_df[self.label_column]
valid_df.loc[:, 'autotrain_id'] = valid_df[self.id_column] if self.id_column else list(range(len(valid_df)))
valid_df.loc[:, 'autotrain_label'] = valid_df[self.label_column]
drop_cols = [self.id_column, self.label_column] if self.id_column else [self.label_column]
train_df = train_df.drop(columns=drop_cols)
valid_df = valid_df.drop(columns=drop_cols)
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
class TabularMultiClassClassificationPreprocessor(TabularBinaryClassificationPreprocessor):
pass
class TabularSingleColumnRegressionPreprocessor(TabularBinaryClassificationPreprocessor):
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
@dataclass
class TabularMultiLabelClassificationPreprocessor:
train_data: pd.DataFrame
label_column: List[str]
username: str
project_name: str
id_column: Optional[str] = None
valid_data: Optional[pd.DataFrame] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
token: Optional[str] = None
local: Optional[bool] = False
def __post_init__(self):
if self.id_column is not None:
if self.id_column not in self.train_data.columns:
raise ValueError(f'{self.id_column} not in train data')
for label in self.label_column:
if label not in self.train_data.columns:
raise ValueError(f'{label} not in train data')
if self.valid_data is not None:
if self.id_column is not None:
if self.id_column not in self.valid_data.columns:
raise ValueError(f'{self.id_column} not in valid data')
for label in self.label_column:
if label not in self.valid_data.columns:
raise ValueError(f'{label} not in valid data')
for column in RESERVED_COLUMNS:
if column in self.train_data.columns:
raise ValueError(f'{column} is a reserved column name')
if self.valid_data is not None:
if column in self.valid_data.columns:
raise ValueError(f'{column} is a reserved column name')
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed, stratify=self.train_data[self.label_column])
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare_columns(self, train_df, valid_df):
train_df.loc[:, 'autotrain_id'] = train_df[self.id_column] if self.id_column else list(range(len(train_df)))
for label in range(len(self.label_column)):
train_df.loc[:, f'autotrain_label_{label}'] = train_df[self.label_column[label]]
valid_df.loc[:, 'autotrain_id'] = valid_df[self.id_column] if self.id_column else list(range(len(valid_df)))
for label in range(len(self.label_column)):
valid_df.loc[:, f'autotrain_label_{label}'] = valid_df[self.label_column[label]]
drop_cols = [self.id_column] + self.label_column if self.id_column else self.label_column
train_df = train_df.drop(columns=drop_cols)
valid_df = valid_df.drop(columns=drop_cols)
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
class TabularMultiColumnRegressionPreprocessor(TabularMultiLabelClassificationPreprocessor):
pass
# File: autotrain-advanced-main/src/autotrain/preprocessor/text.py
import ast
from dataclasses import dataclass
from typing import Optional
import pandas as pd
from datasets import ClassLabel, Dataset, DatasetDict, Sequence
from sklearn.model_selection import train_test_split
from autotrain import logger
RESERVED_COLUMNS = ['autotrain_text', 'autotrain_label', 'autotrain_question', 'autotrain_answer']
LLM_RESERVED_COLUMNS = ['autotrain_prompt', 'autotrain_context', 'autotrain_rejected_text', 'autotrain_prompt_start']
@dataclass
class TextBinaryClassificationPreprocessor:
train_data: pd.DataFrame
text_column: str
label_column: str
username: str
project_name: str
token: str
valid_data: Optional[pd.DataFrame] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
convert_to_class_label: Optional[bool] = False
local: Optional[bool] = False
def __post_init__(self):
if self.text_column not in self.train_data.columns:
raise ValueError(f'{self.text_column} not in train data')
if self.label_column not in self.train_data.columns:
raise ValueError(f'{self.label_column} not in train data')
if self.valid_data is not None:
if self.text_column not in self.valid_data.columns:
raise ValueError(f'{self.text_column} not in valid data')
if self.label_column not in self.valid_data.columns:
raise ValueError(f'{self.label_column} not in valid data')
for column in RESERVED_COLUMNS:
if column in self.train_data.columns:
raise ValueError(f'{column} is a reserved column name')
if self.valid_data is not None:
if column in self.valid_data.columns:
raise ValueError(f'{column} is a reserved column name')
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed, stratify=self.train_data[self.label_column])
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare_columns(self, train_df, valid_df):
train_df.loc[:, 'autotrain_text'] = train_df[self.text_column]
train_df.loc[:, 'autotrain_label'] = train_df[self.label_column]
valid_df.loc[:, 'autotrain_text'] = valid_df[self.text_column]
valid_df.loc[:, 'autotrain_label'] = valid_df[self.label_column]
train_df = train_df.drop(columns=[self.text_column, self.label_column])
valid_df = valid_df.drop(columns=[self.text_column, self.label_column])
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
train_df.loc[:, 'autotrain_label'] = train_df['autotrain_label'].astype(str)
valid_df.loc[:, 'autotrain_label'] = valid_df['autotrain_label'].astype(str)
label_names = sorted(set(train_df['autotrain_label'].unique().tolist()))
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.convert_to_class_label:
train_df = train_df.cast_column('autotrain_label', ClassLabel(names=label_names))
valid_df = valid_df.cast_column('autotrain_label', ClassLabel(names=label_names))
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
class TextMultiClassClassificationPreprocessor(TextBinaryClassificationPreprocessor):
pass
class TextSingleColumnRegressionPreprocessor(TextBinaryClassificationPreprocessor):
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
class TextTokenClassificationPreprocessor(TextBinaryClassificationPreprocessor):
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
try:
train_df.loc[:, 'autotrain_text'] = train_df['autotrain_text'].apply(lambda x: ast.literal_eval(x))
valid_df.loc[:, 'autotrain_text'] = valid_df['autotrain_text'].apply(lambda x: ast.literal_eval(x))
except ValueError:
logger.warning("Unable to do ast.literal_eval on train_df['autotrain_text']")
logger.warning('assuming autotrain_text is already a list')
try:
train_df.loc[:, 'autotrain_label'] = train_df['autotrain_label'].apply(lambda x: ast.literal_eval(x))
valid_df.loc[:, 'autotrain_label'] = valid_df['autotrain_label'].apply(lambda x: ast.literal_eval(x))
except ValueError:
logger.warning("Unable to do ast.literal_eval on train_df['autotrain_label']")
logger.warning('assuming autotrain_label is already a list')
label_names_train = sorted(set(train_df['autotrain_label'].explode().unique().tolist()))
label_names_valid = sorted(set(valid_df['autotrain_label'].explode().unique().tolist()))
label_names = sorted(set(label_names_train + label_names_valid))
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.convert_to_class_label:
train_df = train_df.cast_column('autotrain_label', Sequence(ClassLabel(names=label_names)))
valid_df = valid_df.cast_column('autotrain_label', Sequence(ClassLabel(names=label_names)))
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
@dataclass
class LLMPreprocessor:
train_data: pd.DataFrame
username: str
project_name: str
token: str
valid_data: Optional[pd.DataFrame] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
text_column: Optional[str] = None
prompt_column: Optional[str] = None
rejected_text_column: Optional[str] = None
local: Optional[bool] = False
def __post_init__(self):
if self.text_column is None:
raise ValueError('text_column must be provided')
if self.prompt_column is not None and self.prompt_column not in self.train_data.columns:
self.prompt_column = None
if self.rejected_text_column is not None and self.rejected_text_column not in self.train_data.columns:
self.rejected_text_column = None
for column in RESERVED_COLUMNS + LLM_RESERVED_COLUMNS:
if column in self.train_data.columns:
raise ValueError(f'{column} is a reserved column name')
if self.valid_data is not None:
if column in self.valid_data.columns:
raise ValueError(f'{column} is a reserved column name')
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
return (self.train_data, self.train_data)
def prepare_columns(self, train_df, valid_df):
drop_cols = [self.text_column]
train_df.loc[:, 'autotrain_text'] = train_df[self.text_column]
valid_df.loc[:, 'autotrain_text'] = valid_df[self.text_column]
if self.prompt_column is not None:
drop_cols.append(self.prompt_column)
train_df.loc[:, 'autotrain_prompt'] = train_df[self.prompt_column]
valid_df.loc[:, 'autotrain_prompt'] = valid_df[self.prompt_column]
if self.rejected_text_column is not None:
drop_cols.append(self.rejected_text_column)
train_df.loc[:, 'autotrain_rejected_text'] = train_df[self.rejected_text_column]
valid_df.loc[:, 'autotrain_rejected_text'] = valid_df[self.rejected_text_column]
train_df = train_df.drop(columns=drop_cols)
valid_df = valid_df.drop(columns=drop_cols)
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
@dataclass
class Seq2SeqPreprocessor:
train_data: pd.DataFrame
text_column: str
label_column: str
username: str
project_name: str
token: str
valid_data: Optional[pd.DataFrame] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
def __post_init__(self):
if self.text_column not in self.train_data.columns:
raise ValueError(f'{self.text_column} not in train data')
if self.label_column not in self.train_data.columns:
raise ValueError(f'{self.label_column} not in train data')
if self.valid_data is not None:
if self.text_column not in self.valid_data.columns:
raise ValueError(f'{self.text_column} not in valid data')
if self.label_column not in self.valid_data.columns:
raise ValueError(f'{self.label_column} not in valid data')
for column in RESERVED_COLUMNS:
if column in self.train_data.columns:
raise ValueError(f'{column} is a reserved column name')
if self.valid_data is not None:
if column in self.valid_data.columns:
raise ValueError(f'{column} is a reserved column name')
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare_columns(self, train_df, valid_df):
train_df.loc[:, 'autotrain_text'] = train_df[self.text_column]
train_df.loc[:, 'autotrain_label'] = train_df[self.label_column]
valid_df.loc[:, 'autotrain_text'] = valid_df[self.text_column]
valid_df.loc[:, 'autotrain_label'] = valid_df[self.label_column]
train_df = train_df.drop(columns=[self.text_column, self.label_column])
valid_df = valid_df.drop(columns=[self.text_column, self.label_column])
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
@dataclass
class SentenceTransformersPreprocessor:
train_data: pd.DataFrame
username: str
project_name: str
token: str
valid_data: Optional[pd.DataFrame] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
sentence1_column: Optional[str] = 'sentence1'
sentence2_column: Optional[str] = 'sentence2'
sentence3_column: Optional[str] = 'sentence3'
target_column: Optional[str] = 'target'
convert_to_class_label: Optional[bool] = False
def __post_init__(self):
for column in RESERVED_COLUMNS + LLM_RESERVED_COLUMNS:
if column in self.train_data.columns:
raise ValueError(f'{column} is a reserved column name')
if self.valid_data is not None:
if column in self.valid_data.columns:
raise ValueError(f'{column} is a reserved column name')
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare_columns(self, train_df, valid_df):
train_df.loc[:, 'autotrain_sentence1'] = train_df[self.sentence1_column]
train_df.loc[:, 'autotrain_sentence2'] = train_df[self.sentence2_column]
valid_df.loc[:, 'autotrain_sentence1'] = valid_df[self.sentence1_column]
valid_df.loc[:, 'autotrain_sentence2'] = valid_df[self.sentence2_column]
keep_cols = ['autotrain_sentence1', 'autotrain_sentence2']
if self.sentence3_column is not None:
train_df.loc[:, 'autotrain_sentence3'] = train_df[self.sentence3_column]
valid_df.loc[:, 'autotrain_sentence3'] = valid_df[self.sentence3_column]
keep_cols.append('autotrain_sentence3')
if self.target_column is not None:
train_df.loc[:, 'autotrain_target'] = train_df[self.target_column]
valid_df.loc[:, 'autotrain_target'] = valid_df[self.target_column]
keep_cols.append('autotrain_target')
train_df = train_df[keep_cols]
valid_df = valid_df[keep_cols]
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
if self.convert_to_class_label:
label_names = sorted(set(train_df['autotrain_target'].unique().tolist()))
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.convert_to_class_label:
train_df = train_df.cast_column('autotrain_target', ClassLabel(names=label_names))
valid_df = valid_df.cast_column('autotrain_target', ClassLabel(names=label_names))
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
@dataclass
class TextExtractiveQuestionAnsweringPreprocessor:
train_data: pd.DataFrame
text_column: str
question_column: str
answer_column: str
username: str
project_name: str
token: str
valid_data: Optional[pd.DataFrame] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
def __post_init__(self):
if self.text_column not in self.train_data.columns:
raise ValueError(f'{self.text_column} not in train data')
if self.question_column not in self.train_data.columns:
raise ValueError(f'{self.question_column} not in train data')
if self.answer_column not in self.train_data.columns:
raise ValueError(f'{self.answer_column} not in train data')
if self.valid_data is not None:
if self.text_column not in self.valid_data.columns:
raise ValueError(f'{self.text_column} not in valid data')
if self.question_column not in self.valid_data.columns:
raise ValueError(f'{self.question_column} not in valid data')
if self.answer_column not in self.valid_data.columns:
raise ValueError(f'{self.answer_column} not in valid data')
for column in RESERVED_COLUMNS:
if column in self.train_data.columns:
raise ValueError(f'{column} is a reserved column name')
if self.valid_data is not None:
if column in self.valid_data.columns:
raise ValueError(f'{column} is a reserved column name')
try:
self.train_data.loc[:, self.answer_column] = self.train_data[self.answer_column].apply(lambda x: ast.literal_eval(x))
except ValueError:
logger.warning('Unable to do ast.literal_eval on train_data[answer_column]')
logger.warning('assuming answer_column is already a dict')
if self.valid_data is not None:
try:
self.valid_data.loc[:, self.answer_column] = self.valid_data[self.answer_column].apply(lambda x: ast.literal_eval(x))
except ValueError:
logger.warning('Unable to do ast.literal_eval on valid_data[answer_column]')
logger.warning('assuming answer_column is already a dict')
def split(self):
if self.valid_data is not None:
return (self.train_data, self.valid_data)
else:
(train_df, valid_df) = train_test_split(self.train_data, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare_columns(self, train_df, valid_df):
train_df.loc[:, 'autotrain_text'] = train_df[self.text_column]
train_df.loc[:, 'autotrain_question'] = train_df[self.question_column]
train_df.loc[:, 'autotrain_answer'] = train_df[self.answer_column]
valid_df.loc[:, 'autotrain_text'] = valid_df[self.text_column]
valid_df.loc[:, 'autotrain_question'] = valid_df[self.question_column]
valid_df.loc[:, 'autotrain_answer'] = valid_df[self.answer_column]
train_df = train_df.drop(columns=[x for x in train_df.columns if x not in ['autotrain_text', 'autotrain_question', 'autotrain_answer']])
valid_df = valid_df.drop(columns=[x for x in valid_df.columns if x not in ['autotrain_text', 'autotrain_question', 'autotrain_answer']])
return (train_df, valid_df)
def prepare(self):
(train_df, valid_df) = self.split()
(train_df, valid_df) = self.prepare_columns(train_df, valid_df)
train_df = Dataset.from_pandas(train_df)
valid_df = Dataset.from_pandas(valid_df)
if self.local:
dataset = DatasetDict({'train': train_df, 'validation': valid_df})
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
train_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='train', private=True, token=self.token)
valid_df.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', split='validation', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
# File: autotrain-advanced-main/src/autotrain/preprocessor/vision.py
import os
import shutil
import uuid
from dataclasses import dataclass
from typing import Optional
import pandas as pd
from datasets import ClassLabel, Features, Image, Sequence, Value, load_dataset
from sklearn.model_selection import train_test_split
ALLOWED_EXTENSIONS = ('jpeg', 'png', 'jpg', 'JPG', 'JPEG', 'PNG')
@dataclass
class ImageClassificationPreprocessor:
train_data: str
username: str
project_name: str
token: str
valid_data: Optional[str] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
def __post_init__(self):
if not os.path.exists(self.train_data):
raise ValueError(f'{self.train_data} does not exist.')
subfolders = [f.path for f in os.scandir(self.train_data) if f.is_dir()]
if len(subfolders) < 2:
raise ValueError(f'{self.train_data} should contain at least 2 subfolders.')
for subfolder in subfolders:
image_files = [f for f in os.listdir(subfolder) if f.endswith(ALLOWED_EXTENSIONS)]
if len(image_files) < 5:
raise ValueError(f'{subfolder} should contain at least 5 jpeg, png or jpg files.')
if len(image_files) != len(os.listdir(subfolder)):
raise ValueError(f'{subfolder} should not contain any other files except image files.')
subfolders_in_subfolder = [f.path for f in os.scandir(subfolder) if f.is_dir()]
if len(subfolders_in_subfolder) > 0:
raise ValueError(f'{subfolder} should not contain any subfolders.')
if self.valid_data:
if not os.path.exists(self.valid_data):
raise ValueError(f'{self.valid_data} does not exist.')
subfolders = [f.path for f in os.scandir(self.valid_data) if f.is_dir()]
train_subfolders = set((os.path.basename(f.path) for f in os.scandir(self.train_data) if f.is_dir()))
valid_subfolders = set((os.path.basename(f.path) for f in os.scandir(self.valid_data) if f.is_dir()))
if train_subfolders != valid_subfolders:
raise ValueError(f'{self.valid_data} should have the same subfolders as {self.train_data}.')
if len(subfolders) < 2:
raise ValueError(f'{self.valid_data} should contain at least 2 subfolders.')
for subfolder in subfolders:
image_files = [f for f in os.listdir(subfolder) if f.endswith(ALLOWED_EXTENSIONS)]
if len(image_files) < 5:
raise ValueError(f'{subfolder} should contain at least 5 jpeg, png or jpg files.')
if len(image_files) != len(os.listdir(subfolder)):
raise ValueError(f'{subfolder} should not contain any other files except image files.')
subfolders_in_subfolder = [f.path for f in os.scandir(subfolder) if f.is_dir()]
if len(subfolders_in_subfolder) > 0:
raise ValueError(f'{subfolder} should not contain any subfolders.')
def split(self, df):
(train_df, valid_df) = train_test_split(df, test_size=self.test_size, random_state=self.seed, stratify=df['subfolder'])
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare(self):
random_uuid = uuid.uuid4()
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
data_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
if self.valid_data:
shutil.copytree(self.train_data, os.path.join(data_dir, 'train'))
shutil.copytree(self.valid_data, os.path.join(data_dir, 'validation'))
dataset = load_dataset('imagefolder', data_dir=data_dir)
dataset = dataset.rename_columns({'image': 'autotrain_image', 'label': 'autotrain_label'})
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
else:
subfolders = [f.path for f in os.scandir(self.train_data) if f.is_dir()]
image_filenames = []
subfolder_names = []
for subfolder in subfolders:
for filename in os.listdir(subfolder):
if filename.endswith(('jpeg', 'png', 'jpg')):
image_filenames.append(filename)
subfolder_names.append(os.path.basename(subfolder))
df = pd.DataFrame({'image_filename': image_filenames, 'subfolder': subfolder_names})
(train_df, valid_df) = self.split(df)
for row in train_df.itertuples():
os.makedirs(os.path.join(data_dir, 'train', row.subfolder), exist_ok=True)
shutil.copy(os.path.join(self.train_data, row.subfolder, row.image_filename), os.path.join(data_dir, 'train', row.subfolder, row.image_filename))
for row in valid_df.itertuples():
os.makedirs(os.path.join(data_dir, 'validation', row.subfolder), exist_ok=True)
shutil.copy(os.path.join(self.train_data, row.subfolder, row.image_filename), os.path.join(data_dir, 'validation', row.subfolder, row.image_filename))
dataset = load_dataset('imagefolder', data_dir=data_dir)
dataset = dataset.rename_columns({'image': 'autotrain_image', 'label': 'autotrain_label'})
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
@dataclass
class ObjectDetectionPreprocessor:
train_data: str
username: str
project_name: str
token: str
valid_data: Optional[str] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
@staticmethod
def _process_metadata(data_path):
metadata = pd.read_json(os.path.join(data_path, 'metadata.jsonl'), lines=True)
if 'file_name' not in metadata.columns or 'objects' not in metadata.columns:
raise ValueError(f"{data_path}/metadata.jsonl should contain 'file_name' and 'objects' columns.")
metadata = metadata[['file_name', 'objects']]
categories = []
for (_, row) in metadata.iterrows():
obj = row['objects']
if 'bbox' not in obj or 'category' not in obj:
raise ValueError(f"{data_path}/metadata.jsonl should contain 'bbox' and 'category' keys in 'objects'.")
obj = {k: obj[k] for k in ['bbox', 'category']}
categories.extend(obj['category'])
categories = set(categories)
return (metadata, categories)
def __post_init__(self):
if not os.path.exists(self.train_data):
raise ValueError(f'{self.train_data} does not exist.')
train_image_files = [f for f in os.listdir(self.train_data) if f.endswith(ALLOWED_EXTENSIONS)]
if len(train_image_files) < 5:
raise ValueError(f'{self.train_data} should contain at least 5 jpeg, png or jpg files.')
if 'metadata.jsonl' not in os.listdir(self.train_data):
raise ValueError(f'{self.train_data} should contain a metadata.jsonl file.')
if self.valid_data:
if not os.path.exists(self.valid_data):
raise ValueError(f'{self.valid_data} does not exist.')
valid_image_files = [f for f in os.listdir(self.valid_data) if f.endswith(ALLOWED_EXTENSIONS)]
if len(valid_image_files) < 5:
raise ValueError(f'{self.valid_data} should contain at least 5 jpeg, png or jpg files.')
if 'metadata.jsonl' not in os.listdir(self.valid_data):
raise ValueError(f'{self.valid_data} should contain a metadata.jsonl file.')
def split(self, df):
(train_df, valid_df) = train_test_split(df, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare(self):
random_uuid = uuid.uuid4()
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
data_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
if self.valid_data:
shutil.copytree(self.train_data, os.path.join(data_dir, 'train'))
shutil.copytree(self.valid_data, os.path.join(data_dir, 'validation'))
(train_metadata, train_categories) = self._process_metadata(os.path.join(data_dir, 'train'))
(valid_metadata, valid_categories) = self._process_metadata(os.path.join(data_dir, 'validation'))
train_metadata.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_metadata.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
all_categories = train_categories.union(valid_categories)
features = Features({'image': Image(), 'objects': Sequence({'bbox': Sequence(Value('float32'), length=4), 'category': ClassLabel(names=list(all_categories))})})
dataset = load_dataset('imagefolder', data_dir=data_dir, features=features)
dataset = dataset.rename_columns({'image': 'autotrain_image', 'objects': 'autotrain_objects'})
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
else:
metadata = pd.read_json(os.path.join(self.train_data, 'metadata.jsonl'), lines=True)
(train_df, valid_df) = self.split(metadata)
os.makedirs(os.path.join(data_dir, 'train'), exist_ok=True)
os.makedirs(os.path.join(data_dir, 'validation'), exist_ok=True)
for row in train_df.iterrows():
shutil.copy(os.path.join(self.train_data, row[1]['file_name']), os.path.join(data_dir, 'train', row[1]['file_name']))
for row in valid_df.iterrows():
shutil.copy(os.path.join(self.train_data, row[1]['file_name']), os.path.join(data_dir, 'validation', row[1]['file_name']))
train_df.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_df.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
(train_metadata, train_categories) = self._process_metadata(os.path.join(data_dir, 'train'))
(valid_metadata, valid_categories) = self._process_metadata(os.path.join(data_dir, 'validation'))
train_metadata.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_metadata.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
all_categories = train_categories.union(valid_categories)
features = Features({'image': Image(), 'objects': Sequence({'bbox': Sequence(Value('float32'), length=4), 'category': ClassLabel(names=list(all_categories))})})
dataset = load_dataset('imagefolder', data_dir=data_dir, features=features)
dataset = dataset.rename_columns({'image': 'autotrain_image', 'objects': 'autotrain_objects'})
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
@dataclass
class ImageRegressionPreprocessor:
train_data: str
username: str
project_name: str
token: str
valid_data: Optional[str] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
@staticmethod
def _process_metadata(data_path):
metadata = pd.read_json(os.path.join(data_path, 'metadata.jsonl'), lines=True)
if 'file_name' not in metadata.columns or 'target' not in metadata.columns:
raise ValueError(f"{data_path}/metadata.jsonl should contain 'file_name' and 'target' columns.")
metadata = metadata[['file_name', 'target']]
return metadata
def __post_init__(self):
if not os.path.exists(self.train_data):
raise ValueError(f'{self.train_data} does not exist.')
train_image_files = [f for f in os.listdir(self.train_data) if f.endswith(ALLOWED_EXTENSIONS)]
if len(train_image_files) < 5:
raise ValueError(f'{self.train_data} should contain at least 5 jpeg, png or jpg files.')
if 'metadata.jsonl' not in os.listdir(self.train_data):
raise ValueError(f'{self.train_data} should contain a metadata.jsonl file.')
if self.valid_data:
if not os.path.exists(self.valid_data):
raise ValueError(f'{self.valid_data} does not exist.')
valid_image_files = [f for f in os.listdir(self.valid_data) if f.endswith(ALLOWED_EXTENSIONS)]
if len(valid_image_files) < 5:
raise ValueError(f'{self.valid_data} should contain at least 5 jpeg, png or jpg files.')
if 'metadata.jsonl' not in os.listdir(self.valid_data):
raise ValueError(f'{self.valid_data} should contain a metadata.jsonl file.')
def split(self, df):
(train_df, valid_df) = train_test_split(df, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare(self):
random_uuid = uuid.uuid4()
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
data_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
if self.valid_data:
shutil.copytree(self.train_data, os.path.join(data_dir, 'train'))
shutil.copytree(self.valid_data, os.path.join(data_dir, 'validation'))
train_metadata = self._process_metadata(os.path.join(data_dir, 'train'))
valid_metadata = self._process_metadata(os.path.join(data_dir, 'validation'))
train_metadata.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_metadata.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
dataset = load_dataset('imagefolder', data_dir=data_dir)
dataset = dataset.rename_columns({'image': 'autotrain_image', 'target': 'autotrain_label'})
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
else:
metadata = pd.read_json(os.path.join(self.train_data, 'metadata.jsonl'), lines=True)
(train_df, valid_df) = self.split(metadata)
os.makedirs(os.path.join(data_dir, 'train'), exist_ok=True)
os.makedirs(os.path.join(data_dir, 'validation'), exist_ok=True)
for row in train_df.iterrows():
shutil.copy(os.path.join(self.train_data, row[1]['file_name']), os.path.join(data_dir, 'train', row[1]['file_name']))
for row in valid_df.iterrows():
shutil.copy(os.path.join(self.train_data, row[1]['file_name']), os.path.join(data_dir, 'validation', row[1]['file_name']))
train_df.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_df.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
train_metadata = self._process_metadata(os.path.join(data_dir, 'train'))
valid_metadata = self._process_metadata(os.path.join(data_dir, 'validation'))
train_metadata.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_metadata.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
dataset = load_dataset('imagefolder', data_dir=data_dir)
dataset = dataset.rename_columns({'image': 'autotrain_image', 'target': 'autotrain_label'})
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
# File: autotrain-advanced-main/src/autotrain/preprocessor/vlm.py
import os
import shutil
import uuid
from dataclasses import dataclass
from typing import Optional
import pandas as pd
from datasets import Features, Image, Value, load_dataset
from sklearn.model_selection import train_test_split
ALLOWED_EXTENSIONS = ('jpeg', 'png', 'jpg', 'JPG', 'JPEG', 'PNG')
@dataclass
class VLMPreprocessor:
train_data: str
username: str
project_name: str
token: str
column_mapping: dict
valid_data: Optional[str] = None
test_size: Optional[float] = 0.2
seed: Optional[int] = 42
local: Optional[bool] = False
def _process_metadata(self, data_path):
metadata = pd.read_json(os.path.join(data_path, 'metadata.jsonl'), lines=True)
if 'file_name' not in metadata.columns:
raise ValueError(f"{data_path}/metadata.jsonl should contain 'file_name' column.")
col_names = list(self.column_mapping.values())
for col in col_names:
if col not in metadata.columns:
raise ValueError(f"{data_path}/metadata.jsonl should contain '{col}' column.")
return metadata
def __post_init__(self):
if not os.path.exists(self.train_data):
raise ValueError(f'{self.train_data} does not exist.')
train_image_files = [f for f in os.listdir(self.train_data) if f.endswith(ALLOWED_EXTENSIONS)]
if len(train_image_files) < 5:
raise ValueError(f'{self.train_data} should contain at least 5 jpeg, png or jpg files.')
if 'metadata.jsonl' not in os.listdir(self.train_data):
raise ValueError(f'{self.train_data} should contain a metadata.jsonl file.')
if self.valid_data:
if not os.path.exists(self.valid_data):
raise ValueError(f'{self.valid_data} does not exist.')
valid_image_files = [f for f in os.listdir(self.valid_data) if f.endswith(ALLOWED_EXTENSIONS)]
if len(valid_image_files) < 5:
raise ValueError(f'{self.valid_data} should contain at least 5 jpeg, png or jpg files.')
if 'metadata.jsonl' not in os.listdir(self.valid_data):
raise ValueError(f'{self.valid_data} should contain a metadata.jsonl file.')
def split(self, df):
(train_df, valid_df) = train_test_split(df, test_size=self.test_size, random_state=self.seed)
train_df = train_df.reset_index(drop=True)
valid_df = valid_df.reset_index(drop=True)
return (train_df, valid_df)
def prepare(self):
random_uuid = uuid.uuid4()
cache_dir = os.environ.get('HF_HOME')
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface')
data_dir = os.path.join(cache_dir, 'autotrain', str(random_uuid))
if self.valid_data:
shutil.copytree(self.train_data, os.path.join(data_dir, 'train'))
shutil.copytree(self.valid_data, os.path.join(data_dir, 'validation'))
train_metadata = self._process_metadata(os.path.join(data_dir, 'train'))
valid_metadata = self._process_metadata(os.path.join(data_dir, 'validation'))
train_metadata.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_metadata.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
features = Features({'image': Image()})
for (_, col_map) in self.column_mapping.items():
features[col_map] = Value(dtype='string')
dataset = load_dataset('imagefolder', data_dir=data_dir, features=features)
rename_dict = {'image': 'autotrain_image'}
for (col, col_map) in self.column_mapping.items():
if col == 'text_column':
rename_dict[col_map] = 'autotrain_text'
elif col == 'prompt_text_column':
rename_dict[col_map] = 'autotrain_prompt'
dataset = dataset.rename_columns(rename_dict)
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
else:
metadata = pd.read_json(os.path.join(self.train_data, 'metadata.jsonl'), lines=True)
(train_df, valid_df) = self.split(metadata)
os.makedirs(os.path.join(data_dir, 'train'), exist_ok=True)
os.makedirs(os.path.join(data_dir, 'validation'), exist_ok=True)
for row in train_df.iterrows():
shutil.copy(os.path.join(self.train_data, row[1]['file_name']), os.path.join(data_dir, 'train', row[1]['file_name']))
for row in valid_df.iterrows():
shutil.copy(os.path.join(self.train_data, row[1]['file_name']), os.path.join(data_dir, 'validation', row[1]['file_name']))
train_df.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_df.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
train_metadata = self._process_metadata(os.path.join(data_dir, 'train'))
valid_metadata = self._process_metadata(os.path.join(data_dir, 'validation'))
train_metadata.to_json(os.path.join(data_dir, 'train', 'metadata.jsonl'), orient='records', lines=True)
valid_metadata.to_json(os.path.join(data_dir, 'validation', 'metadata.jsonl'), orient='records', lines=True)
features = Features({'image': Image()})
for (_, col_map) in self.column_mapping.items():
features[col_map] = Value(dtype='string')
dataset = load_dataset('imagefolder', data_dir=data_dir, features=features)
rename_dict = {'image': 'autotrain_image'}
for (col, col_map) in self.column_mapping.items():
if col == 'text_column':
rename_dict[col_map] = 'autotrain_text'
elif col == 'prompt_text_column':
rename_dict[col_map] = 'autotrain_prompt'
dataset = dataset.rename_columns(rename_dict)
if self.local:
dataset.save_to_disk(f'{self.project_name}/autotrain-data')
else:
dataset.push_to_hub(f'{self.username}/autotrain-data-{self.project_name}', private=True, token=self.token)
if self.local:
return f'{self.project_name}/autotrain-data'
return f'{self.username}/autotrain-data-{self.project_name}'
# File: autotrain-advanced-main/src/autotrain/project.py
""""""
from dataclasses import dataclass
from typing import List, Union
from autotrain.backends.base import AVAILABLE_HARDWARE
from autotrain.backends.endpoints import EndpointsRunner
from autotrain.backends.local import LocalRunner
from autotrain.backends.ngc import NGCRunner
from autotrain.backends.nvcf import NVCFRunner
from autotrain.backends.spaces import SpaceRunner
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
@dataclass
class AutoTrainProject:
params: Union[List[Union[LLMTrainingParams, TextClassificationParams, TabularParams, DreamBoothTrainingParams, Seq2SeqParams, ImageClassificationParams, TextRegressionParams, ObjectDetectionParams, TokenClassificationParams, SentenceTransformersParams, ImageRegressionParams]], LLMTrainingParams, TextClassificationParams, TabularParams, DreamBoothTrainingParams, Seq2SeqParams, ImageClassificationParams, TextRegressionParams, ObjectDetectionParams, TokenClassificationParams, SentenceTransformersParams, ImageRegressionParams]
backend: str
def __post_init__(self):
if self.backend not in AVAILABLE_HARDWARE:
raise ValueError(f'Invalid backend: {self.backend}')
def create(self):
if self.backend.startswith('local'):
runner = LocalRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith('spaces-'):
runner = SpaceRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith('ep-'):
runner = EndpointsRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith('ngc-'):
runner = NGCRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith('nvcf-'):
runner = NVCFRunner(params=self.params, backend=self.backend)
return runner.create()
else:
raise NotImplementedError
# File: autotrain-advanced-main/src/autotrain/tasks.py
NLP_TASKS = {'text_binary_classification': 1, 'text_multi_class_classification': 2, 'text_token_classification': 4, 'text_extractive_question_answering': 5, 'text_summarization': 8, 'text_single_column_regression': 10, 'speech_recognition': 11, 'natural_language_inference': 22, 'lm_training': 9, 'seq2seq': 28, 'sentence_transformers': 30, 'vlm': 31}
VISION_TASKS = {'image_binary_classification': 17, 'image_multi_class_classification': 18, 'image_single_column_regression': 24, 'image_object_detection': 29, 'dreambooth': 25}
TABULAR_TASKS = {'tabular_binary_classification': 13, 'tabular_multi_class_classification': 14, 'tabular_multi_label_classification': 15, 'tabular_single_column_regression': 16, 'tabular': 26}
TASKS = {**NLP_TASKS, **VISION_TASKS, **TABULAR_TASKS}
# File: autotrain-advanced-main/src/autotrain/tools/convert_to_kohya.py
from diffusers.utils import convert_all_state_dict_to_peft, convert_state_dict_to_kohya
from safetensors.torch import load_file, save_file
from autotrain import logger
def convert_to_kohya(input_path, output_path):
logger.info(f'Converting Lora state dict from {input_path} to Kohya state dict at {output_path}')
lora_state_dict = load_file(input_path)
peft_state_dict = convert_all_state_dict_to_peft(lora_state_dict)
kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict)
save_file(kohya_state_dict, output_path)
logger.info(f'Kohya state dict saved at {output_path}')
# File: autotrain-advanced-main/src/autotrain/tools/merge_adapter.py
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE
def merge_llm_adapter(base_model_path, adapter_path, token, output_folder=None, pad_to_multiple_of=None, push_to_hub=False):
if output_folder is None and push_to_hub is False:
raise ValueError('You must specify either --output_folder or --push_to_hub')
logger.info('Loading adapter...')
base_model = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=ALLOW_REMOTE_CODE, token=token)
tokenizer = AutoTokenizer.from_pretrained(adapter_path, trust_remote_code=ALLOW_REMOTE_CODE, token=token)
if pad_to_multiple_of:
base_model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=pad_to_multiple_of)
else:
base_model.resize_token_embeddings(len(tokenizer))
model = PeftModel.from_pretrained(base_model, adapter_path, token=token)
model = model.merge_and_unload()
if output_folder is not None:
logger.info('Saving target model...')
model.save_pretrained(output_folder)
tokenizer.save_pretrained(output_folder)
logger.info(f'Model saved to {output_folder}')
if push_to_hub:
logger.info('Pushing model to Hugging Face Hub...')
model.push_to_hub(adapter_path)
tokenizer.push_to_hub(adapter_path)
logger.info(f'Model pushed to Hugging Face Hub as {adapter_path}')
# File: autotrain-advanced-main/src/autotrain/trainers/clm/__main__.py
import argparse
import json
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.common import monitor
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = LLMTrainingParams(**config)
if config.trainer == 'default':
from autotrain.trainers.clm.train_clm_default import train as train_default
train_default(config)
elif config.trainer == 'sft':
from autotrain.trainers.clm.train_clm_sft import train as train_sft
train_sft(config)
elif config.trainer == 'reward':
from autotrain.trainers.clm.train_clm_reward import train as train_reward
train_reward(config)
elif config.trainer == 'dpo':
from autotrain.trainers.clm.train_clm_dpo import train as train_dpo
train_dpo(config)
elif config.trainer == 'orpo':
from autotrain.trainers.clm.train_clm_orpo import train as train_orpo
train_orpo(config)
else:
raise ValueError(f'trainer `{config.trainer}` not supported')
if __name__ == '__main__':
_args = parse_args()
training_config = json.load(open(_args.training_config))
_config = LLMTrainingParams(**training_config)
train(_config)
# File: autotrain-advanced-main/src/autotrain/trainers/clm/callbacks.py
import os
import torch
from peft import set_peft_model_state_dict
from transformers import TrainerCallback, TrainerControl, TrainerState, TrainingArguments
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(TrainerCallback):
def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
checkpoint_folder = os.path.join(args.output_dir, f'{PREFIX_CHECKPOINT_DIR}-{state.global_step}')
kwargs['model'].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, 'pytorch_model.bin')
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
print(f'Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).')
best_model_path = os.path.join(state.best_model_checkpoint, 'adapter_model.bin')
adapters_weights = torch.load(best_model_path)
model = kwargs['model']
set_peft_model_state_dict(model, adapters_weights)
return control
class SaveDeepSpeedPeftModelCallback(TrainerCallback):
def __init__(self, trainer, save_steps=500):
self.trainer = trainer
self.save_steps = save_steps
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
if (state.global_step + 1) % self.save_steps == 0:
self.trainer.accelerator.wait_for_everyone()
state_dict = self.trainer.accelerator.get_state_dict(self.trainer.deepspeed)
unwrapped_model = self.trainer.accelerator.unwrap_model(self.trainer.deepspeed)
if self.trainer.accelerator.is_main_process:
unwrapped_model.save_pretrained(args.output_dir, state_dict=state_dict)
self.trainer.accelerator.wait_for_everyone()
return control
# File: autotrain-advanced-main/src/autotrain/trainers/clm/params.py
from typing import List, Optional, Union
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class LLMTrainingParams(AutoTrainParams):
model: str = Field('gpt2', title='Model name')
project_name: str = Field('project-name', title='Output directory')
data_path: str = Field('data', title='Data path')
train_split: str = Field('train', title='Train data config')
valid_split: Optional[str] = Field(None, title='Validation data config')
add_eos_token: bool = Field(True, title='Add EOS token')
block_size: Union[int, List[int]] = Field(-1, title='Block size')
model_max_length: int = Field(2048, title='Model max length')
padding: Optional[str] = Field('right', title='Padding side')
trainer: str = Field('default', title='Trainer type')
use_flash_attention_2: bool = Field(False, title='Use flash attention 2')
log: str = Field('none', title='Logging using experiment tracking')
disable_gradient_checkpointing: bool = Field(False, title='Gradient checkpointing')
logging_steps: int = Field(-1, title='Logging steps')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
save_total_limit: int = Field(1, title='Save total limit')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
lr: float = Field(3e-05, title='Learning rate')
epochs: int = Field(1, title='Number of training epochs')
batch_size: int = Field(2, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(4, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
chat_template: Optional[str] = Field(None, title='Chat template, one of: None, zephyr, chatml or tokenizer')
quantization: Optional[str] = Field('int4', title='int4, int8, or None')
target_modules: Optional[str] = Field('all-linear', title='Target modules')
merge_adapter: bool = Field(False, title='Merge adapter')
peft: bool = Field(False, title='Use PEFT')
lora_r: int = Field(16, title='Lora r')
lora_alpha: int = Field(32, title='Lora alpha')
lora_dropout: float = Field(0.05, title='Lora dropout')
model_ref: Optional[str] = Field(None, title='Reference, for DPO trainer')
dpo_beta: float = Field(0.1, title='Beta for DPO trainer')
max_prompt_length: int = Field(128, title='Prompt length')
max_completion_length: Optional[int] = Field(None, title='Completion length')
prompt_text_column: Optional[str] = Field(None, title='Prompt text column')
text_column: str = Field('text', title='Text column')
rejected_text_column: Optional[str] = Field(None, title='Rejected text column')
push_to_hub: bool = Field(False, title='Push to hub')
username: Optional[str] = Field(None, title='Hugging Face Username')
token: Optional[str] = Field(None, title='Huggingface token')
unsloth: bool = Field(False, title='Use unsloth')
# File: autotrain-advanced-main/src/autotrain/trainers/clm/train_clm_default.py
from functools import partial
import torch
from datasets import Dataset
from peft.tuners.lora import LoraLayer
from transformers import Trainer, TrainingArguments, default_data_collator
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.clm import utils
from autotrain.trainers.clm.params import LLMTrainingParams
def process_data(data, tokenizer, config):
data = data.to_pandas()
data = data.fillna('')
data = data[[config.text_column]]
if config.add_eos_token:
data[config.text_column] = data[config.text_column] + tokenizer.eos_token
data = Dataset.from_pandas(data)
return data
def train(config):
logger.info('Starting default/generic CLM training...')
if isinstance(config, dict):
config = LLMTrainingParams(**config)
(train_data, valid_data) = utils.process_input_data(config)
tokenizer = utils.get_tokenizer(config)
(train_data, valid_data) = utils.process_data_with_chat_template(config, tokenizer, train_data, valid_data)
train_data = process_data(data=train_data, tokenizer=tokenizer, config=config)
if config.valid_split is not None:
valid_data = process_data(data=valid_data, tokenizer=tokenizer, config=config)
logging_steps = utils.configure_logging_steps(config, train_data, valid_data)
training_args = utils.configure_training_args(config, logging_steps)
config = utils.configure_block_size(config, tokenizer)
args = TrainingArguments(**training_args)
model = utils.get_model(config, tokenizer)
tokenize_fn = partial(utils.tokenize, tokenizer=tokenizer, config=config)
group_texts_fn = partial(utils.group_texts, config=config)
train_data = train_data.map(tokenize_fn, batched=True, num_proc=1, remove_columns=list(train_data.features), desc='Running tokenizer on train dataset')
if config.valid_split is not None:
valid_data = valid_data.map(tokenize_fn, batched=True, num_proc=1, remove_columns=list(valid_data.features), desc='Running tokenizer on validation dataset')
train_data = train_data.map(group_texts_fn, batched=True, num_proc=4, desc=f'Grouping texts in chunks of {config.block_size}')
if config.valid_split is not None:
valid_data = valid_data.map(group_texts_fn, batched=True, num_proc=4, desc=f'Grouping texts in chunks of {config.block_size}')
logger.info('creating trainer')
callbacks = utils.get_callbacks(config)
trainer_args = dict(args=args, model=model, callbacks=callbacks)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data if config.valid_split is not None else None, tokenizer=tokenizer, data_collator=default_data_collator)
for (name, module) in trainer.model.named_modules():
if isinstance(module, LoraLayer):
if config.mixed_precision == 'bf16':
module = module.to(torch.bfloat16)
if 'norm' in name:
module = module.to(torch.float32)
if any((x in name for x in ['lm_head', 'embed_tokens', 'wte', 'wpe'])):
if hasattr(module, 'weight'):
if config.mixed_precision == 'bf16' and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
trainer.remove_callback(PrinterCallback)
trainer.train()
utils.post_training_steps(config, trainer)
# File: autotrain-advanced-main/src/autotrain/trainers/clm/train_clm_dpo.py
import torch
from peft import LoraConfig
from transformers import AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from trl import DPOTrainer
from autotrain import logger
from autotrain.trainers.clm import utils
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.common import ALLOW_REMOTE_CODE
def train(config):
logger.info('Starting DPO training...')
if isinstance(config, dict):
config = LLMTrainingParams(**config)
(train_data, valid_data) = utils.process_input_data(config)
tokenizer = utils.get_tokenizer(config)
(train_data, valid_data) = utils.process_data_with_chat_template(config, tokenizer, train_data, valid_data)
logging_steps = utils.configure_logging_steps(config, train_data, valid_data)
training_args = utils.configure_training_args(config, logging_steps)
config = utils.configure_block_size(config, tokenizer)
args = TrainingArguments(**training_args)
logger.info('loading model config...')
model_config = AutoConfig.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_cache=config.disable_gradient_checkpointing)
logger.info('loading model...')
if config.peft:
if config.quantization == 'int4':
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=False)
elif config.quantization == 'int8':
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
else:
bnb_config = None
model = AutoModelForCausalLM.from_pretrained(config.model, config=model_config, token=config.token, quantization_config=bnb_config, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2)
logger.info('Using PEFT, model_ref will be set to None')
model_ref = None
else:
model = AutoModelForCausalLM.from_pretrained(config.model, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2)
if config.model_ref is not None:
model_ref = AutoModelForCausalLM.from_pretrained(config.model_ref, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2)
else:
model_ref = None
logger.info(f'model dtype: {model.dtype}')
model.resize_token_embeddings(len(tokenizer))
if model_ref is not None:
logger.info(f'model_ref dtype: {model_ref.dtype}')
model_ref.resize_token_embeddings(len(tokenizer))
if config.peft:
peft_config = LoraConfig(r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias='none', task_type='CAUSAL_LM', target_modules=utils.get_target_modules(config))
logger.info('creating trainer')
callbacks = utils.get_callbacks(config)
trainer_args = dict(args=args, model=model, callbacks=callbacks)
trainer = DPOTrainer(**trainer_args, ref_model=model_ref, beta=config.dpo_beta, train_dataset=train_data, eval_dataset=valid_data if config.valid_split is not None else None, tokenizer=tokenizer, max_length=config.block_size, max_prompt_length=config.max_prompt_length, max_target_length=config.max_completion_length, peft_config=peft_config if config.peft else None)
trainer.remove_callback(PrinterCallback)
trainer.train()
utils.post_training_steps(config, trainer)
# File: autotrain-advanced-main/src/autotrain/trainers/clm/train_clm_orpo.py
from peft import LoraConfig
from transformers.trainer_callback import PrinterCallback
from trl import ORPOConfig, ORPOTrainer
from autotrain import logger
from autotrain.trainers.clm import utils
from autotrain.trainers.clm.params import LLMTrainingParams
def train(config):
logger.info('Starting ORPO training...')
if isinstance(config, dict):
config = LLMTrainingParams(**config)
(train_data, valid_data) = utils.process_input_data(config)
tokenizer = utils.get_tokenizer(config)
(train_data, valid_data) = utils.process_data_with_chat_template(config, tokenizer, train_data, valid_data)
logging_steps = utils.configure_logging_steps(config, train_data, valid_data)
training_args = utils.configure_training_args(config, logging_steps)
config = utils.configure_block_size(config, tokenizer)
training_args['max_length'] = config.block_size
training_args['max_prompt_length'] = config.max_prompt_length
training_args['max_completion_length'] = config.max_completion_length
args = ORPOConfig(**training_args)
model = utils.get_model(config, tokenizer)
if config.peft:
peft_config = LoraConfig(r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias='none', task_type='CAUSAL_LM', target_modules=utils.get_target_modules(config))
logger.info('creating trainer')
callbacks = utils.get_callbacks(config)
trainer_args = dict(args=args, model=model, callbacks=callbacks)
trainer = ORPOTrainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data if config.valid_split is not None else None, tokenizer=tokenizer, peft_config=peft_config if config.peft else None)
trainer.remove_callback(PrinterCallback)
trainer.train()
utils.post_training_steps(config, trainer)
# File: autotrain-advanced-main/src/autotrain/trainers/clm/train_clm_reward.py
from functools import partial
import torch
from peft import LoraConfig
from transformers import AutoConfig, AutoModelForSequenceClassification, BitsAndBytesConfig
from transformers.trainer_callback import PrinterCallback
from trl import RewardConfig, RewardTrainer
from autotrain import logger
from autotrain.trainers.clm import utils
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.common import ALLOW_REMOTE_CODE
def train(config):
logger.info('Starting Reward training...')
if isinstance(config, dict):
config = LLMTrainingParams(**config)
(train_data, valid_data) = utils.process_input_data(config)
tokenizer = utils.get_tokenizer(config)
(train_data, valid_data) = utils.process_data_with_chat_template(config, tokenizer, train_data, valid_data)
logging_steps = utils.configure_logging_steps(config, train_data, valid_data)
training_args = utils.configure_training_args(config, logging_steps)
config = utils.configure_block_size(config, tokenizer)
training_args['max_length'] = config.block_size
args = RewardConfig(**training_args)
logger.info('loading model config...')
model_config = AutoConfig.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_cache=config.disable_gradient_checkpointing)
model_config.num_labels = 1
model_config.pad_token_id = tokenizer.pad_token_id
model_config.pad_token = tokenizer.pad_token
logger.info('loading model...')
if config.peft:
if config.quantization == 'int4':
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=False)
elif config.quantization == 'int8':
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
else:
bnb_config = None
model = AutoModelForSequenceClassification.from_pretrained(config.model, config=model_config, token=config.token, quantization_config=bnb_config, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2)
else:
model = AutoModelForSequenceClassification.from_pretrained(config.model, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2)
logger.info(f'model dtype: {model.dtype}')
model.resize_token_embeddings(len(tokenizer))
if config.peft:
peft_config = LoraConfig(r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias='none', task_type='SEQ_CLS', target_modules=utils.get_target_modules(config))
reward_proc = partial(utils.preprocess_reward, tokenizer=tokenizer)
train_data = train_data.map(reward_proc, batched=True, num_proc=4, desc='Running tokenizer on train dataset')
train_data = train_data.filter(lambda x: len(x['input_ids_chosen']) <= config.block_size and len(x['input_ids_rejected']) <= config.block_size)
if config.valid_split is not None:
valid_data = valid_data.map(reward_proc, batched=True, num_proc=4, desc='Running tokenizer on validation dataset')
valid_data = valid_data.filter(lambda x: len(x['input_ids_chosen']) <= config.block_size and len(x['input_ids_rejected']) <= config.block_size)
logger.info('creating trainer')
callbacks = utils.get_callbacks(config)
trainer_args = dict(args=args, model=model, callbacks=callbacks)
trainer = RewardTrainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data if config.valid_split is not None else None, peft_config=peft_config if config.peft else None, tokenizer=tokenizer)
trainer.remove_callback(PrinterCallback)
trainer.train()
utils.post_training_steps(config, trainer)
# File: autotrain-advanced-main/src/autotrain/trainers/clm/train_clm_sft.py
from peft import LoraConfig
from transformers.trainer_callback import PrinterCallback
from trl import SFTConfig, SFTTrainer
from autotrain import logger
from autotrain.trainers.clm import utils
from autotrain.trainers.clm.params import LLMTrainingParams
def train(config):
logger.info('Starting SFT training...')
if isinstance(config, dict):
config = LLMTrainingParams(**config)
(train_data, valid_data) = utils.process_input_data(config)
tokenizer = utils.get_tokenizer(config)
(train_data, valid_data) = utils.process_data_with_chat_template(config, tokenizer, train_data, valid_data)
logging_steps = utils.configure_logging_steps(config, train_data, valid_data)
training_args = utils.configure_training_args(config, logging_steps)
config = utils.configure_block_size(config, tokenizer)
training_args['dataset_text_field'] = config.text_column
training_args['max_seq_length'] = config.block_size
training_args['packing'] = True
args = SFTConfig(**training_args)
model = utils.get_model(config, tokenizer)
if config.peft:
peft_config = LoraConfig(r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias='none', task_type='CAUSAL_LM', target_modules=utils.get_target_modules(config))
logger.info('creating trainer')
callbacks = utils.get_callbacks(config)
trainer_args = dict(args=args, model=model, callbacks=callbacks)
trainer = SFTTrainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data if config.valid_split is not None else None, peft_config=peft_config if config.peft else None, tokenizer=tokenizer)
trainer.remove_callback(PrinterCallback)
trainer.train()
utils.post_training_steps(config, trainer)
# File: autotrain-advanced-main/src/autotrain/trainers/clm/utils.py
import ast
import gc
import os
from enum import Enum
from itertools import chain
import requests
import torch
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from autotrain import is_unsloth_available, logger
from autotrain.trainers.clm.callbacks import LoadBestPeftModelCallback, SavePeftModelCallback
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, pause_space, remove_autotrain_data, save_training_params
DEFAULT_CHAT_TEMPLATE = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
CHATML_CHAT_TEMPLATE = "{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% if loop.last and add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}{% endfor %}"
ZEPHYR_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = '[PAD]'
DEFAULT_EOS_TOKEN = ''
DEFAULT_BOS_TOKEN = ''
DEFAULT_UNK_TOKEN = ''
TARGET_MODULES = {'Salesforce/codegen25-7b-multi': 'q_proj,k_proj,v_proj,o_proj,down_proj,up_proj,gate_proj'}
MODEL_CARD = '\n---\ntags:\n- autotrain\n- text-generation-inference\n- text-generation{peft}\nlibrary_name: transformers{base_model}\nwidget:\n - messages:\n - role: user\n content: What is your favorite condiment?\nlicense: other{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\nThis model was trained using AutoTrain. For more information, please visit [AutoTrain](https://hf.co/docs/autotrain).\n\n# Usage\n\n```python\n\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nmodel_path = "PATH_TO_THIS_REPO"\n\ntokenizer = AutoTokenizer.from_pretrained(model_path)\nmodel = AutoModelForCausalLM.from_pretrained(\n model_path,\n device_map="auto",\n torch_dtype=\'auto\'\n).eval()\n\n# Prompt content: "hi"\nmessages = [\n {{"role": "user", "content": "hi"}}\n]\n\ninput_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors=\'pt\')\noutput_ids = model.generate(input_ids.to(\'cuda\'))\nresponse = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)\n\n# Model response: "Hello! How can I assist you today?"\nprint(response)\n```\n\n'
class ZephyrSpecialTokens(str, Enum):
USER = '<|user|>'
ASSISTANT = '<|assistant|>'
SYSTEM = '<|system|>'
EOS_TOKEN = ''
BOS_TOKEN = ''
PAD_TOKEN = ''
@classmethod
def list(cls):
return [c.value for c in cls]
class ChatmlSpecialTokens(str, Enum):
USER = '<|im_start|>user'
ASSISTANT = '<|im_start|>assistant'
SYSTEM = '<|im_start|>system'
EOS_TOKEN = '<|im_end|>'
BOS_TOKEN = ''
PAD_TOKEN = ''
@classmethod
def list(cls):
return [c.value for c in cls]
def preprocess_reward(examples, tokenizer):
new_examples = {'input_ids_chosen': [], 'attention_mask_chosen': [], 'input_ids_rejected': [], 'attention_mask_rejected': []}
for (chosen, rejected) in zip(examples['chosen'], examples['rejected']):
tokenized_chosen = tokenizer(chosen, truncation=True)
tokenized_rejected = tokenizer(rejected, truncation=True)
new_examples['input_ids_chosen'].append(tokenized_chosen['input_ids'])
new_examples['attention_mask_chosen'].append(tokenized_chosen['attention_mask'])
new_examples['input_ids_rejected'].append(tokenized_rejected['input_ids'])
new_examples['attention_mask_rejected'].append(tokenized_rejected['attention_mask'])
return new_examples
def get_target_modules(config):
if config.target_modules is None:
return TARGET_MODULES.get(config.model)
if config.target_modules.strip() == '':
return TARGET_MODULES.get(config.model)
if config.target_modules.strip().lower() == 'all-linear':
return 'all-linear'
return config.target_modules.split(',')
def group_texts(examples, config):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if total_length >= config.block_size:
total_length = total_length // config.block_size * config.block_size
else:
total_length = 0
result = {k: [t[i:i + config.block_size] for i in range(0, total_length, config.block_size)] for (k, t) in concatenated_examples.items()}
result['labels'] = result['input_ids'].copy()
return result
def tokenize(examples, tokenizer, config):
output = tokenizer(examples[config.text_column])
return output
def merge_adapter(base_model_path, target_model_path, adapter_path):
logger.info('Loading adapter...')
model = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=ALLOW_REMOTE_CODE)
tokenizer = AutoTokenizer.from_pretrained(target_model_path, trust_remote_code=ALLOW_REMOTE_CODE)
try:
model.resize_token_embeddings(len(tokenizer))
model = PeftModel.from_pretrained(model, adapter_path)
except RuntimeError:
model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8)
model = PeftModel.from_pretrained(model, adapter_path)
model = model.merge_and_unload()
logger.info('Saving target model...')
model.save_pretrained(target_model_path)
tokenizer.save_pretrained(target_model_path)
def create_model_card(config):
if config.peft:
peft = '\n- peft'
else:
peft = ''
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, peft=peft, base_model=base_model)
return model_card.strip()
def pause_endpoint(params):
endpoint_id = os.environ['ENDPOINT_ID']
username = endpoint_id.split('/')[0]
project_name = endpoint_id.split('/')[1]
api_url = f'https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause'
headers = {'Authorization': f'Bearer {params.token}'}
r = requests.post(api_url, headers=headers, timeout=30)
return r.json()
def apply_chat_template(example, tokenizer, config):
if config.trainer in ('default', 'sft'):
messages = example[config.text_column]
if isinstance(messages, str):
messages = ast.literal_eval(messages)
example[config.text_column] = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
elif config.trainer == 'reward':
if all((k in example.keys() for k in ('chosen', 'rejected'))):
chosen_messages = example['chosen']
rejected_messages = example['rejected']
if isinstance(chosen_messages, str):
chosen_messages = ast.literal_eval(chosen_messages)
if isinstance(rejected_messages, str):
rejected_messages = ast.literal_eval(rejected_messages)
if config.chat_template == 'zephyr' and chosen_messages[0]['role'] != 'system':
chosen_messages.insert(0, {'role': 'system', 'content': ''})
if config.chat_template == 'zephyr' and rejected_messages[0]['role'] != 'system':
rejected_messages.insert(0, {'role': 'system', 'content': ''})
example['chosen'] = tokenizer.apply_chat_template(chosen_messages, tokenize=False)
example['rejected'] = tokenizer.apply_chat_template(rejected_messages, tokenize=False)
else:
raise ValueError(f'Could not format example as dialogue for `rm/orpo` task! Require `[chosen, rejected]` keys but found {list(example.keys())}')
elif config.trainer in ('dpo', 'orpo'):
if all((k in example.keys() for k in ('chosen', 'rejected'))):
if isinstance(example['chosen'], str):
example['chosen'] = ast.literal_eval(example['chosen'])
if isinstance(example['rejected'], str):
example['rejected'] = ast.literal_eval(example['rejected'])
prompt_messages = example['chosen'][:-1]
if config.chat_template == 'zephyr' and example['chosen'][0]['role'] != 'system':
prompt_messages.insert(0, {'role': 'system', 'content': ''})
chosen_messages = example['chosen'][-1:]
rejected_messages = example['rejected'][-1:]
example['chosen'] = tokenizer.apply_chat_template(chosen_messages, tokenize=False)
example['rejected'] = tokenizer.apply_chat_template(rejected_messages, tokenize=False)
example['prompt'] = tokenizer.apply_chat_template(prompt_messages, tokenize=False)
else:
raise ValueError(f'Could not format example as dialogue for `dpo` task! Require `[chosen, rejected]` keys but found {list(example.keys())}')
return example
def post_training_steps(config, trainer):
logger.info('Finished training, saving model...')
trainer.model.config.use_cache = True
trainer.save_model(config.project_name)
model_card = create_model_card(config)
with open(f'{config.project_name}/README.md', 'w', encoding='utf-8') as f:
f.write(model_card)
if config.peft and config.merge_adapter:
del trainer
gc.collect()
torch.cuda.empty_cache()
logger.info('Merging adapter weights...')
try:
merge_adapter(base_model_path=config.model, target_model_path=config.project_name, adapter_path=config.project_name)
for file in os.listdir(config.project_name):
if file.startswith('adapter_'):
os.remove(f'{config.project_name}/{file}')
except Exception as e:
logger.warning(f'Failed to merge adapter weights: {e}')
logger.warning('Skipping adapter merge. Only adapter weights will be saved.')
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
logger.info('Pushing model to hub...')
save_training_params(config)
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
def process_input_data(config):
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.trainer in ('dpo', 'reward', 'orpo'):
if not (config.text_column == 'chosen' and config.text_column in train_data.column_names):
train_data = train_data.rename_column(config.text_column, 'chosen')
if not (config.rejected_text_column == 'rejected' and config.rejected_text_column in train_data.column_names):
train_data = train_data.rename_column(config.rejected_text_column, 'rejected')
if config.trainer in ('dpo', 'orpo'):
if not (config.prompt_text_column == 'prompt' and config.prompt_text_column in train_data.column_names):
train_data = train_data.rename_column(config.prompt_text_column, 'prompt')
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
if config.trainer in ('dpo', 'reward', 'orpo'):
if not (config.text_column == 'chosen' and config.text_column in valid_data.column_names):
valid_data = valid_data.rename_column(config.text_column, 'chosen')
if not (config.rejected_text_column == 'rejected' and config.rejected_text_column in valid_data.column_names):
valid_data = valid_data.rename_column(config.rejected_text_column, 'rejected')
if config.trainer in ('dpo', 'reward'):
if not (config.prompt_text_column == 'prompt' and config.prompt_text_column in valid_data.column_names):
valid_data = valid_data.rename_column(config.prompt_text_column, 'prompt')
else:
valid_data = None
logger.info(f'Train data: {train_data}')
logger.info(f'Valid data: {valid_data}')
return (train_data, valid_data)
def get_tokenizer(config):
special_tokens = None
chat_template = None
if config.chat_template == 'chatml':
special_tokens = ChatmlSpecialTokens
chat_template = CHATML_CHAT_TEMPLATE
elif config.chat_template == 'zephyr':
special_tokens = ZephyrSpecialTokens
chat_template = ZEPHYR_CHAT_TEMPLATE
if special_tokens is not None:
tokenizer = AutoTokenizer.from_pretrained(config.model, pad_token=special_tokens.PAD_TOKEN.value, bos_token=special_tokens.BOS_TOKEN.value, eos_token=special_tokens.EOS_TOKEN.value, additional_special_tokens=special_tokens.list(), token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
tokenizer.chat_template = chat_template
else:
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
if tokenizer.chat_template is None:
tokenizer.chat_template = DEFAULT_CHAT_TEMPLATE
if tokenizer.model_max_length > 2048:
tokenizer.model_max_length = config.model_max_length
if getattr(tokenizer, 'pad_token', None) is None:
tokenizer.pad_token = tokenizer.eos_token
if getattr(tokenizer, 'pad_token_id', None) is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
if config.padding in ('left', 'right'):
tokenizer.padding_side = config.padding
return tokenizer
def process_data_with_chat_template(config, tokenizer, train_data, valid_data):
valid_data = None
if config.chat_template in ('chatml', 'zephyr', 'tokenizer'):
logger.info('Applying chat template')
logger.info('For ORPO/DPO, `prompt` will be extracted from chosen messages')
train_data = train_data.map(apply_chat_template, fn_kwargs={'tokenizer': tokenizer, 'config': config})
if config.valid_split is not None:
valid_data = valid_data.map(apply_chat_template, fn_kwargs={'tokenizer': tokenizer, 'config': config})
return (train_data, valid_data)
def configure_logging_steps(config, train_data, valid_data):
logger.info('configuring logging steps')
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
return logging_steps
def configure_training_args(config, logging_steps):
logger.info('configuring training args')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False, gradient_checkpointing=not config.disable_gradient_checkpointing, remove_unused_columns=False)
if not config.disable_gradient_checkpointing:
if config.peft and config.quantization in ('int4', 'int8'):
training_args['gradient_checkpointing_kwargs'] = {'use_reentrant': True}
else:
training_args['gradient_checkpointing_kwargs'] = {'use_reentrant': False}
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
return training_args
def configure_block_size(config, tokenizer):
if config.block_size == -1:
config.block_size = None
if config.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning('The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can override this default with `--block_size xxx`.')
block_size = 1024
else:
if config.block_size > tokenizer.model_max_length:
logger.warning(f'The block_size passed ({config.block_size}) is larger than the maximum length for the model({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.')
block_size = min(config.block_size, tokenizer.model_max_length)
config.block_size = block_size
logger.info(f'Using block size {block_size}')
return config
def get_callbacks(config):
is_deepspeed_enabled = os.environ.get('ACCELERATE_USE_DEEPSPEED', 'False').lower() == 'true'
callbacks = [UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()]
if config.peft and (not is_deepspeed_enabled):
callbacks.append(SavePeftModelCallback)
if config.valid_split is not None:
callbacks.append(LoadBestPeftModelCallback)
return callbacks
def get_model(config, tokenizer):
model_config = AutoConfig.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
model_type = model_config.model_type
unsloth_target_modules = None
can_use_unloth = False
if config.unsloth and is_unsloth_available() and (config.trainer in ('default', 'sft')):
can_use_unloth = True
if model_type in ('llama', 'mistral', 'gemma', 'qwen2') and config.unsloth:
if config.target_modules.strip().lower() == 'all-linear':
unsloth_target_modules = ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj']
else:
unsloth_target_modules = get_target_modules(config)
else:
can_use_unloth = False
logger.info(f'Can use unsloth: {can_use_unloth}')
if can_use_unloth:
from unsloth import FastLanguageModel
load_in_4bit = False
load_in_8bit = False
if config.peft and config.quantization == 'int4':
load_in_4bit = True
elif config.peft and config.quantization == 'int8':
load_in_8bit = True
dtype = None
if config.mixed_precision == 'fp16':
dtype = torch.float16
elif config.mixed_precision == 'bf16':
dtype = torch.bfloat16
(model, _) = FastLanguageModel.from_pretrained(model_name=config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, load_in_4bit=load_in_4bit, load_in_8bit=load_in_8bit, max_seq_length=config.block_size, dtype=dtype)
if config.peft:
model = FastLanguageModel.get_peft_model(model, r=config.lora_r, target_modules=unsloth_target_modules, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias='none', use_gradient_checkpointing='unsloth', random_state=config.seed, max_seq_length=config.block_size, use_rslora=False, loftq_config=None)
return model
else:
logger.warning('Unsloth not available, continuing without it...')
logger.info('loading model config...')
model_config = AutoConfig.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_cache=config.disable_gradient_checkpointing)
logger.info('loading model...')
if config.peft:
if config.quantization == 'int4':
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=False)
elif config.quantization == 'int8':
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
else:
bnb_config = None
model = AutoModelForCausalLM.from_pretrained(config.model, config=model_config, token=config.token, quantization_config=bnb_config, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2)
else:
model = AutoModelForCausalLM.from_pretrained(config.model, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2)
logger.info(f'model dtype: {model.dtype}')
model.resize_token_embeddings(len(tokenizer))
if config.trainer != 'default':
return model
if config.peft:
logger.info('preparing peft model...')
if config.quantization is not None:
gradient_checkpointing_kwargs = {}
if not config.disable_gradient_checkpointing:
if config.quantization in ('int4', 'int8'):
gradient_checkpointing_kwargs = {'use_reentrant': True}
else:
gradient_checkpointing_kwargs = {'use_reentrant': False}
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=not config.disable_gradient_checkpointing, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs)
else:
model.enable_input_require_grads()
peft_config = LoraConfig(r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias='none', task_type='CAUSAL_LM', target_modules=get_target_modules(config))
model = get_peft_model(model, peft_config)
return model
# File: autotrain-advanced-main/src/autotrain/trainers/common.py
""""""
import json
import os
import shutil
import time
import traceback
import requests
from accelerate import PartialState
from huggingface_hub import HfApi
from pydantic import BaseModel
from transformers import TrainerCallback, TrainerControl, TrainerState, TrainingArguments
from autotrain import is_colab, logger
ALLOW_REMOTE_CODE = os.environ.get('ALLOW_REMOTE_CODE', 'true').lower() == 'true'
def get_file_sizes(directory):
file_sizes = {}
for (root, _, files) in os.walk(directory):
for file in files:
file_path = os.path.join(root, file)
file_size = os.path.getsize(file_path)
file_size_gb = file_size / 1024 ** 3
file_sizes[file_path] = file_size_gb
return file_sizes
def remove_global_step(directory):
for (root, dirs, _) in os.walk(directory, topdown=False):
for name in dirs:
if name.startswith('global_step'):
folder_path = os.path.join(root, name)
print(f'Removing folder: {folder_path}')
shutil.rmtree(folder_path)
def remove_autotrain_data(config):
os.system(f'rm -rf {config.project_name}/autotrain-data')
remove_global_step(config.project_name)
def save_training_params(config):
if os.path.exists(f'{config.project_name}/training_params.json'):
training_params = json.load(open(f'{config.project_name}/training_params.json'))
if 'token' in training_params:
training_params.pop('token')
json.dump(training_params, open(f'{config.project_name}/training_params.json', 'w'), indent=4)
def pause_endpoint(params):
if isinstance(params, dict):
token = params['token']
else:
token = params.token
endpoint_id = os.environ['ENDPOINT_ID']
username = endpoint_id.split('/')[0]
project_name = endpoint_id.split('/')[1]
api_url = f'https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause'
headers = {'Authorization': f'Bearer {token}'}
r = requests.post(api_url, headers=headers, timeout=120)
return r.json()
def pause_space(params, is_failure=False):
if 'SPACE_ID' in os.environ:
logger.info('Pausing space...')
api = HfApi(token=params.token)
if is_failure:
msg = 'Your training run has failed! Please check the logs for more details'
title = 'Your training has failed ❌'
else:
msg = 'Your training run was successful! [Check out your trained model here]'
msg += f'(https://huggingface.co/{params.username}/{params.project_name})'
title = 'Your training has finished successfully ✅'
if not params.token.startswith('hf_oauth_'):
try:
api.create_discussion(repo_id=os.environ['SPACE_ID'], title=title, description=msg, repo_type='space')
except Exception as e:
logger.warning(f'Failed to create discussion: {e}')
if is_failure:
logger.error('Model failed to train and discussion was not created.')
else:
logger.warning('Model trained successfully but discussion was not created.')
api.pause_space(repo_id=os.environ['SPACE_ID'])
if 'ENDPOINT_ID' in os.environ:
logger.info('Pausing endpoint...')
pause_endpoint(params)
def monitor(func):
def wrapper(*args, **kwargs):
config = kwargs.get('config', None)
if config is None and len(args) > 0:
config = args[0]
try:
return func(*args, **kwargs)
except Exception as e:
error_message = f'{func.__name__} has failed due to an exception: {traceback.format_exc()}'
logger.error(error_message)
logger.error(str(e))
if int(os.environ.get('PAUSE_ON_FAILURE', 1)) == 1:
pause_space(config, is_failure=True)
return wrapper
class AutoTrainParams(BaseModel):
class Config:
protected_namespaces = ()
def save(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
path = os.path.join(output_dir, 'training_params.json')
with open(path, 'w', encoding='utf-8') as f:
f.write(self.model_dump_json(indent=4))
def __str__(self):
data = self.model_dump()
data['token'] = '*****' if data.get('token') else None
return str(data)
def __init__(self, **data):
super().__init__(**data)
if len(self.project_name) > 0:
if not self.project_name.replace('-', '').isalnum():
raise ValueError('project_name must be alphanumeric but can contain hyphens')
if len(self.project_name) > 50:
raise ValueError('project_name cannot be more than 50 characters')
defaults = set(self.model_fields.keys())
supplied = set(data.keys())
not_supplied = defaults - supplied
if not_supplied and (not is_colab):
logger.warning(f"Parameters not supplied by user and set to default: {', '.join(not_supplied)}")
unused = supplied - set(self.model_fields)
if unused:
logger.warning(f"Parameters supplied but not used: {', '.join(unused)}")
class UploadLogs(TrainerCallback):
def __init__(self, config):
self.config = config
self.api = None
self.last_upload_time = 0
if self.config.push_to_hub:
if PartialState().process_index == 0:
self.api = HfApi(token=config.token)
self.api.create_repo(repo_id=f'{self.config.username}/{self.config.project_name}', repo_type='model', private=True)
def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
if self.config.push_to_hub is False:
return control
if not os.path.exists(os.path.join(self.config.project_name, 'runs')):
return control
if (state.global_step + 1) % self.config.logging_steps == 0 and self.config.log == 'tensorboard':
if PartialState().process_index == 0:
current_time = time.time()
if current_time - self.last_upload_time >= 600:
try:
self.api.upload_folder(folder_path=os.path.join(self.config.project_name, 'runs'), repo_id=f'{self.config.username}/{self.config.project_name}', path_in_repo='runs')
except Exception as e:
logger.warning(f'Failed to upload logs: {e}')
logger.warning('Continuing training...')
self.last_upload_time = current_time
return control
class LossLoggingCallback(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop('total_flos', None)
if state.is_local_process_zero:
logger.info(logs)
class TrainStartCallback(TrainerCallback):
def on_train_begin(self, args, state, control, **kwargs):
logger.info('Starting to train...')
# File: autotrain-advanced-main/src/autotrain/trainers/dreambooth/__main__.py
import argparse
import json
import os
from diffusers.utils import convert_all_state_dict_to_peft, convert_state_dict_to_kohya
from huggingface_hub import create_repo, snapshot_download, upload_folder
from safetensors.torch import load_file, save_file
from autotrain import logger
from autotrain.trainers.common import monitor, pause_space, remove_autotrain_data
from autotrain.trainers.dreambooth import utils
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = DreamBoothTrainingParams(**config)
config.prompt = str(config.prompt).strip()
if config.model in utils.XL_MODELS:
config.xl = True
try:
snapshot_download(repo_id=config.image_path, local_dir=config.project_name, token=config.token, repo_type='dataset')
config.image_path = os.path.join(config.project_name, 'concept1')
except Exception as e:
logger.warning(f'Failed to download dataset: {e}')
pass
if config.image_path == f'{config.project_name}/autotrain-data':
config.image_path = os.path.join(config.image_path, 'concept1')
if config.vae_model is not None:
if config.vae_model.strip() == '':
config.vae_model = None
if config.xl:
from autotrain.trainers.dreambooth.train_xl import main
class Args:
pretrained_model_name_or_path = config.model
pretrained_vae_model_name_or_path = config.vae_model
revision = config.revision
variant = None
dataset_name = None
dataset_config_name = None
instance_data_dir = config.image_path
cache_dir = None
image_column = 'image'
caption_column = None
repeats = 1
class_data_dir = config.class_image_path
instance_prompt = config.prompt
class_prompt = config.class_prompt
validation_prompt = None
num_validation_images = 4
validation_epochs = 50
with_prior_preservation = config.prior_preservation
num_class_images = config.num_class_images
output_dir = config.project_name
seed = config.seed
resolution = config.resolution
center_crop = config.center_crop
train_text_encoder = config.train_text_encoder
train_batch_size = config.batch_size
sample_batch_size = config.sample_batch_size
num_train_epochs = config.epochs
max_train_steps = config.num_steps
checkpointing_steps = config.checkpointing_steps
checkpoints_total_limit = None
resume_from_checkpoint = config.resume_from_checkpoint
gradient_accumulation_steps = config.gradient_accumulation
gradient_checkpointing = not config.disable_gradient_checkpointing
learning_rate = config.lr
text_encoder_lr = 5e-06
scale_lr = config.scale_lr
lr_scheduler = config.scheduler
snr_gamma = None
lr_warmup_steps = config.warmup_steps
lr_num_cycles = config.num_cycles
lr_power = config.lr_power
dataloader_num_workers = config.dataloader_num_workers
optimizer = 'AdamW'
use_8bit_adam = config.use_8bit_adam
adam_beta1 = config.adam_beta1
adam_beta2 = config.adam_beta2
prodigy_beta3 = None
prodigy_decouple = True
adam_weight_decay = config.adam_weight_decay
adam_weight_decay_text_encoder = 0.001
adam_epsilon = config.adam_epsilon
prodigy_use_bias_correction = True
prodigy_safeguard_warmup = True
max_grad_norm = config.max_grad_norm
push_to_hub = config.push_to_hub
hub_token = config.token
hub_model_id = f'{config.username}/{config.project_name}'
logging_dir = os.path.join(config.project_name, 'logs')
allow_tf32 = config.allow_tf32
report_to = 'tensorboard' if config.logging else None
mixed_precision = config.mixed_precision
prior_generation_precision = config.prior_generation_precision
local_rank = config.local_rank
enable_xformers_memory_efficient_attention = config.xformers
rank = config.rank
do_edm_style_training = False
random_flip = False
use_dora = False
_args = Args()
main(_args)
else:
from autotrain.trainers.dreambooth.train import main
class Args:
pretrained_model_name_or_path = config.model
pretrained_vae_model_name_or_path = config.vae_model
revision = config.revision
variant = None
tokenizer_name = None
instance_data_dir = config.image_path
class_data_dir = config.class_image_path
instance_prompt = config.prompt
class_prompt = config.class_prompt
validation_prompt = None
num_validation_images = 4
validation_epochs = 50
with_prior_preservation = config.prior_preservation
num_class_images = config.num_class_images
output_dir = config.project_name
seed = config.seed
resolution = config.resolution
center_crop = config.center_crop
train_text_encoder = config.train_text_encoder
train_batch_size = config.batch_size
sample_batch_size = config.sample_batch_size
max_train_steps = config.num_steps
checkpointing_steps = config.checkpointing_steps
checkpoints_total_limit = None
resume_from_checkpoint = config.resume_from_checkpoint
gradient_accumulation_steps = config.gradient_accumulation
gradient_checkpointing = not config.disable_gradient_checkpointing
learning_rate = config.lr
scale_lr = config.scale_lr
lr_scheduler = config.scheduler
lr_warmup_steps = config.warmup_steps
lr_num_cycles = config.num_cycles
lr_power = config.lr_power
dataloader_num_workers = config.dataloader_num_workers
use_8bit_adam = config.use_8bit_adam
adam_beta1 = config.adam_beta1
adam_beta2 = config.adam_beta2
adam_weight_decay = config.adam_weight_decay
adam_epsilon = config.adam_epsilon
max_grad_norm = config.max_grad_norm
push_to_hub = config.push_to_hub
hub_token = config.token
hub_model_id = f'{config.username}/{config.project_name}'
logging_dir = os.path.join(config.project_name, 'logs')
allow_tf32 = config.allow_tf32
report_to = 'tensorboard' if config.logging else None
mixed_precision = config.mixed_precision
prior_generation_precision = config.prior_generation_precision
local_rank = config.local_rank
enable_xformers_memory_efficient_attention = config.xformers
pre_compute_text_embeddings = config.pre_compute_text_embeddings
tokenizer_max_length = config.tokenizer_max_length
text_encoder_use_attention_mask = config.text_encoder_use_attention_mask
validation_images = None
class_labels_conditioning = config.class_labels_conditioning
rank = config.rank
_args = Args()
main(_args)
if os.path.exists(f'{config.project_name}/training_params.json'):
training_params = json.load(open(f'{config.project_name}/training_params.json'))
if 'token' in training_params:
training_params.pop('token')
json.dump(training_params, open(f'{config.project_name}/training_params.json', 'w'))
with open(f'{config.project_name}/prompt.txt', 'w') as f:
f.write(config.prompt)
try:
logger.info('Converting model to Kohya format...')
lora_state_dict = load_file(f'{config.project_name}/pytorch_lora_weights.safetensors')
peft_state_dict = convert_all_state_dict_to_peft(lora_state_dict)
kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict)
save_file(kohya_state_dict, f'{config.project_name}/pytorch_lora_weights_kohya.safetensors')
except Exception as e:
logger.warning(e)
logger.warning('Failed to convert model to Kohya format, skipping...')
if config.push_to_hub:
remove_autotrain_data(config)
repo_id = create_repo(repo_id=f'{config.username}/{config.project_name}', exist_ok=True, private=True, token=config.token).repo_id
if config.xl:
utils.save_model_card_xl(repo_id, base_model=config.model, train_text_encoder=config.train_text_encoder, instance_prompt=config.prompt, vae_path=config.vae_model, repo_folder=config.project_name)
else:
utils.save_model_card(repo_id, base_model=config.model, train_text_encoder=config.train_text_encoder, instance_prompt=config.prompt, repo_folder=config.project_name)
upload_folder(repo_id=repo_id, folder_path=config.project_name, commit_message='End of training', ignore_patterns=['step_*', 'epoch_*'], token=config.token)
pause_space(config)
if __name__ == '__main__':
args = parse_args()
training_config = json.load(open(args.training_config))
config = DreamBoothTrainingParams(**training_config)
train(config)
# File: autotrain-advanced-main/src/autotrain/trainers/dreambooth/datasets.py
from pathlib import Path
import torch
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import Dataset
from torchvision import transforms
class PromptDataset(Dataset):
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example['prompt'] = self.prompt
example['index'] = index
return example
class DreamBoothDatasetXL(Dataset):
def __init__(self, instance_data_root, class_data_root=None, class_num=None, size=1024, center_crop=False):
self.size = size
self.center_crop = center_crop
self.instance_data_root = Path(instance_data_root)
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self._length = self.num_instance_images
if class_data_root is not None:
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
if class_num is not None:
self.num_class_images = min(len(self.class_images_path), class_num)
else:
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
else:
self.class_data_root = None
self.image_transforms = transforms.Compose([transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
instance_image = exif_transpose(instance_image)
if not instance_image.mode == 'RGB':
instance_image = instance_image.convert('RGB')
example['instance_images'] = self.image_transforms(instance_image)
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
class_image = exif_transpose(class_image)
if not class_image.mode == 'RGB':
class_image = class_image.convert('RGB')
example['class_images'] = self.image_transforms(class_image)
return example
class DreamBoothDataset(Dataset):
def __init__(self, config, tokenizers, encoder_hidden_states, instance_prompt_encoder_hidden_states):
self.config = config
self.tokenizer = tokenizers[0]
self.size = self.config.resolution
self.center_crop = self.config.center_crop
self.tokenizer_max_length = self.config.tokenizer_max_length
self.instance_data_root = Path(self.config.image_path)
self.instance_prompt = self.config.prompt
self.class_data_root = Path(self.config.class_image_path) if self.config.prior_preservation else None
self.class_prompt = self.config.class_prompt
self.class_num = self.config.num_class_images
self.encoder_hidden_states = encoder_hidden_states
self.instance_prompt_encoder_hidden_states = instance_prompt_encoder_hidden_states
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(self.instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self._length = self.num_instance_images
if self.class_data_root is not None:
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
if self.class_num is not None:
self.num_class_images = min(len(self.class_images_path), self.class_num)
else:
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
else:
self.class_data_root = None
self.image_transforms = transforms.Compose([transforms.Resize(self.size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(self.size) if self.center_crop else transforms.RandomCrop(self.size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
def __len__(self):
return self._length
def _tokenize_prompt(self, tokenizer, prompt, tokenizer_max_length=None):
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(prompt, truncation=True, padding='max_length', max_length=max_length, return_tensors='pt')
return text_inputs
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
instance_image = exif_transpose(instance_image)
if not instance_image.mode == 'RGB':
instance_image = instance_image.convert('RGB')
example['instance_images'] = self.image_transforms(instance_image)
if not self.config.xl:
if self.encoder_hidden_states is not None:
example['instance_prompt_ids'] = self.encoder_hidden_states
else:
text_inputs = self._tokenize_prompt(self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length)
example['instance_prompt_ids'] = text_inputs.input_ids
example['instance_attention_mask'] = text_inputs.attention_mask
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
class_image = exif_transpose(class_image)
if not class_image.mode == 'RGB':
class_image = class_image.convert('RGB')
example['class_images'] = self.image_transforms(class_image)
if not self.config.xl:
if self.instance_prompt_encoder_hidden_states is not None:
example['class_prompt_ids'] = self.instance_prompt_encoder_hidden_states
else:
class_text_inputs = self._tokenize_prompt(self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length)
example['class_prompt_ids'] = class_text_inputs.input_ids
example['class_attention_mask'] = class_text_inputs.attention_mask
return example
def collate_fn(examples, config):
pixel_values = [example['instance_images'] for example in examples]
if not config.xl:
has_attention_mask = 'instance_attention_mask' in examples[0]
input_ids = [example['instance_prompt_ids'] for example in examples]
if has_attention_mask:
attention_mask = [example['instance_attention_mask'] for example in examples]
if config.prior_preservation:
pixel_values += [example['class_images'] for example in examples]
if not config.xl:
input_ids += [example['class_prompt_ids'] for example in examples]
if has_attention_mask:
attention_mask += [example['class_attention_mask'] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
batch = {'pixel_values': pixel_values}
if not config.xl:
input_ids = torch.cat(input_ids, dim=0)
batch['input_ids'] = input_ids
if has_attention_mask:
batch['attention_mask'] = attention_mask
return batch
# File: autotrain-advanced-main/src/autotrain/trainers/dreambooth/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class DreamBoothTrainingParams(AutoTrainParams):
model: str = Field(None, title='Model name')
vae_model: Optional[str] = Field(None, title='VAE model name')
revision: Optional[str] = Field(None, title='Revision')
tokenizer: Optional[str] = Field(None, title='Tokenizer, if different from model')
image_path: str = Field(None, title='Image path')
class_image_path: Optional[str] = Field(None, title='Class image path')
prompt: str = Field(None, title='Instance prompt')
class_prompt: Optional[str] = Field(None, title='Class prompt')
num_class_images: int = Field(100, title='Number of class images')
class_labels_conditioning: Optional[str] = Field(None, title='Class labels conditioning')
prior_preservation: bool = Field(False, title='With prior preservation')
prior_loss_weight: float = Field(1.0, title='Prior loss weight')
project_name: str = Field('dreambooth-model', title='Output directory')
seed: int = Field(42, title='Seed')
resolution: int = Field(512, title='Resolution')
center_crop: bool = Field(False, title='Center crop')
train_text_encoder: bool = Field(False, title='Train text encoder')
batch_size: int = Field(4, title='Train batch size')
sample_batch_size: int = Field(4, title='Sample batch size')
epochs: int = Field(1, title='Number of training epochs')
num_steps: int = Field(None, title='Max train steps')
checkpointing_steps: int = Field(500, title='Checkpointing steps')
resume_from_checkpoint: Optional[str] = Field(None, title='Resume from checkpoint')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
disable_gradient_checkpointing: bool = Field(False, title='Gradient checkpointing')
lr: float = Field(0.0001, title='Learning rate')
scale_lr: bool = Field(False, title='Scale learning rate')
scheduler: str = Field('constant', title='Learning rate scheduler')
warmup_steps: int = Field(0, title='Learning rate warmup steps')
num_cycles: int = Field(1, title='Learning rate num cycles')
lr_power: float = Field(1.0, title='Learning rate power')
dataloader_num_workers: int = Field(0, title='Dataloader num workers')
use_8bit_adam: bool = Field(False, title='Use 8bit adam')
adam_beta1: float = Field(0.9, title='Adam beta 1')
adam_beta2: float = Field(0.999, title='Adam beta 2')
adam_weight_decay: float = Field(0.01, title='Adam weight decay')
adam_epsilon: float = Field(1e-08, title='Adam epsilon')
max_grad_norm: float = Field(1.0, title='Max grad norm')
allow_tf32: bool = Field(False, title='Allow TF32')
prior_generation_precision: Optional[str] = Field(None, title='Prior generation precision')
local_rank: int = Field(-1, title='Local rank')
xformers: bool = Field(False, title='Enable xformers memory efficient attention')
pre_compute_text_embeddings: bool = Field(False, title='Pre compute text embeddings')
tokenizer_max_length: Optional[int] = Field(None, title='Tokenizer max length')
text_encoder_use_attention_mask: bool = Field(False, title='Text encoder use attention mask')
rank: int = Field(4, title='Rank')
xl: bool = Field(False, title='XL')
mixed_precision: Optional[str] = Field(None, title='Mixed precision')
token: Optional[str] = Field(None, title='Hub token')
push_to_hub: bool = Field(False, title='Push to hub')
username: Optional[str] = Field(None, title='Hub username')
validation_prompt: Optional[str] = Field(None, title='Validation prompt')
num_validation_images: int = Field(4, title='Number of validation images')
validation_epochs: int = Field(50, title='Validation epochs')
checkpoints_total_limit: Optional[int] = Field(None, title='Checkpoints total limit')
validation_images: Optional[str] = Field(None, title='Validation images')
logging: bool = Field(False, title='Logging using tensorboard')
# File: autotrain-advanced-main/src/autotrain/trainers/dreambooth/train.py
import copy
import gc
import logging
import math
import os
import shutil
from pathlib import Path
import diffusers
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration, set_seed
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel
from diffusers.loaders import LoraLoaderMixin
from diffusers.optimization import get_scheduler
from diffusers.training_utils import _set_state_dict_into_text_encoder, cast_training_params
from diffusers.utils import convert_state_dict_to_diffusers, convert_unet_state_dict_to_peft, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.torch_utils import is_compiled_module
from huggingface_hub.utils import insecure_hashlib
from packaging import version
from peft import LoraConfig
from peft.utils import get_peft_model_state_dict, set_peft_model_state_dict
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from autotrain import logger
def log_validation(pipeline, args, accelerator, pipeline_args, epoch, is_final_validation=False):
logger.info(f'Running validation... \n Generating {args.num_validation_images} images with prompt: {args.validation_prompt}.')
scheduler_args = {}
if 'variance_type' in pipeline.scheduler.config:
variance_type = pipeline.scheduler.config.variance_type
if variance_type in ['learned', 'learned_range']:
variance_type = 'fixed_small'
scheduler_args['variance_type'] = variance_type
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
if args.validation_images is None:
images = []
for _ in range(args.num_validation_images):
with torch.cuda.amp.autocast():
image = pipeline(**pipeline_args, generator=generator).images[0]
images.append(image)
else:
images = []
for image in args.validation_images:
image = Image.open(image)
with torch.cuda.amp.autocast():
image = pipeline(**pipeline_args, image=image, generator=generator).images[0]
images.append(image)
for tracker in accelerator.trackers:
phase_name = 'test' if is_final_validation else 'validation'
if tracker.name == 'tensorboard':
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images(phase_name, np_images, epoch, dataformats='NHWC')
del pipeline
torch.cuda.empty_cache()
return images
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(pretrained_model_name_or_path, subfolder='text_encoder', revision=revision)
model_class = text_encoder_config.architectures[0]
if model_class == 'CLIPTextModel':
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == 'RobertaSeriesModelWithTransformation':
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
return RobertaSeriesModelWithTransformation
elif model_class == 'T5EncoderModel':
from transformers import T5EncoderModel
return T5EncoderModel
else:
raise ValueError(f'{model_class} is not supported.')
class DreamBoothDataset(Dataset):
def __init__(self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, class_num=None, size=512, center_crop=False, encoder_hidden_states=None, class_prompt_encoder_hidden_states=None, tokenizer_max_length=None):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.encoder_hidden_states = encoder_hidden_states
self.class_prompt_encoder_hidden_states = class_prompt_encoder_hidden_states
self.tokenizer_max_length = tokenizer_max_length
self.instance_data_root = Path(instance_data_root)
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self.instance_prompt = instance_prompt
self._length = self.num_instance_images
if class_data_root is not None:
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
if class_num is not None:
self.num_class_images = min(len(self.class_images_path), class_num)
else:
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.class_prompt = class_prompt
else:
self.class_data_root = None
self.image_transforms = transforms.Compose([transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
instance_image = exif_transpose(instance_image)
if not instance_image.mode == 'RGB':
instance_image = instance_image.convert('RGB')
example['instance_images'] = self.image_transforms(instance_image)
if self.encoder_hidden_states is not None:
example['instance_prompt_ids'] = self.encoder_hidden_states
else:
text_inputs = tokenize_prompt(self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length)
example['instance_prompt_ids'] = text_inputs.input_ids
example['instance_attention_mask'] = text_inputs.attention_mask
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
class_image = exif_transpose(class_image)
if not class_image.mode == 'RGB':
class_image = class_image.convert('RGB')
example['class_images'] = self.image_transforms(class_image)
if self.class_prompt_encoder_hidden_states is not None:
example['class_prompt_ids'] = self.class_prompt_encoder_hidden_states
else:
class_text_inputs = tokenize_prompt(self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length)
example['class_prompt_ids'] = class_text_inputs.input_ids
example['class_attention_mask'] = class_text_inputs.attention_mask
return example
def collate_fn(examples, with_prior_preservation=False):
has_attention_mask = 'instance_attention_mask' in examples[0]
input_ids = [example['instance_prompt_ids'] for example in examples]
pixel_values = [example['instance_images'] for example in examples]
if has_attention_mask:
attention_mask = [example['instance_attention_mask'] for example in examples]
if with_prior_preservation:
input_ids += [example['class_prompt_ids'] for example in examples]
pixel_values += [example['class_images'] for example in examples]
if has_attention_mask:
attention_mask += [example['class_attention_mask'] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = torch.cat(input_ids, dim=0)
batch = {'input_ids': input_ids, 'pixel_values': pixel_values}
if has_attention_mask:
batch['attention_mask'] = attention_mask
return batch
class PromptDataset(Dataset):
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example['prompt'] = self.prompt
example['index'] = index
return example
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(prompt, truncation=True, padding='max_length', max_length=max_length, return_tensors='pt')
return text_inputs
def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None):
text_input_ids = input_ids.to(text_encoder.device)
if text_encoder_use_attention_mask:
attention_mask = attention_mask.to(text_encoder.device)
else:
attention_mask = None
prompt_embeds = text_encoder(text_input_ids, attention_mask=attention_mask, return_dict=False)
prompt_embeds = prompt_embeds[0]
return prompt_embeds
def main(args):
if args.report_to == 'wandb' and args.hub_token is not None:
raise ValueError('You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token. Please use `huggingface-cli login` to authenticate with the Hub.')
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config)
if args.report_to == 'wandb':
if not is_wandb_available():
raise ImportError('Make sure to install wandb if you want to use it for logging during training.')
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and (accelerator.num_processes > 1):
raise ValueError('Gradient accumulation is not supported when training the text encoder in distributed training. Please set gradient_accumulation_steps to 1. This feature will be supported in the future.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if args.seed is not None:
set_seed(args.seed)
if args.with_prior_preservation:
class_images_dir = Path(args.class_data_dir)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < args.num_class_images:
torch_dtype = torch.float16 if accelerator.device.type == 'cuda' else torch.float32
if args.prior_generation_precision == 'fp32':
torch_dtype = torch.float32
elif args.prior_generation_precision == 'fp16':
torch_dtype = torch.float16
elif args.prior_generation_precision == 'bf16':
torch_dtype = torch.bfloat16
pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, variant=args.variant)
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
logger.info(f'Number of class images to sample: {num_new_images}.')
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(sample_dataloader, desc='Generating class images', disable=not accelerator.is_local_main_process):
images = pipeline(example['prompt']).images
for (i, image) in enumerate(images):
hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer', revision=args.revision, use_fast=False)
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
text_encoder = text_encoder_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision, variant=args.variant)
vae_path = args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path
try:
vae = AutoencoderKL.from_pretrained(vae_path, subfolder='vae' if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant)
except OSError:
vae = None
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision, variant=args.variant)
if vae is not None:
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
weight_dtype = torch.float32
if accelerator.mixed_precision == 'fp16':
weight_dtype = torch.float16
elif accelerator.mixed_precision == 'bf16':
weight_dtype = torch.bfloat16
unet.to(accelerator.device, dtype=weight_dtype)
if vae is not None:
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse('0.0.16'):
logger.warning('xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details.')
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError('xformers is not available. Make sure it is installed correctly')
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
if args.train_text_encoder:
text_encoder.gradient_checkpointing_enable()
unet_lora_config = LoraConfig(r=args.rank, lora_alpha=args.rank, init_lora_weights='gaussian', target_modules=['to_k', 'to_q', 'to_v', 'to_out.0', 'add_k_proj', 'add_v_proj'])
unet.add_adapter(unet_lora_config)
if args.train_text_encoder:
text_lora_config = LoraConfig(r=args.rank, lora_alpha=args.rank, init_lora_weights='gaussian', target_modules=['q_proj', 'k_proj', 'v_proj', 'out_proj'])
text_encoder.add_adapter(text_lora_config)
def unwrap_model(model):
model = accelerator.unwrap_model(model)
model = model._orig_mod if is_compiled_module(model) else model
return model
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
unet_lora_layers_to_save = None
text_encoder_lora_layers_to_save = None
for model in models:
if isinstance(model, type(unwrap_model(unet))):
unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
elif isinstance(model, type(unwrap_model(text_encoder))):
text_encoder_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
else:
raise ValueError(f'unexpected save model: {model.__class__}')
weights.pop()
LoraLoaderMixin.save_lora_weights(output_dir, unet_lora_layers=unet_lora_layers_to_save, text_encoder_lora_layers=text_encoder_lora_layers_to_save)
def load_model_hook(models, input_dir):
unet_ = None
text_encoder_ = None
while len(models) > 0:
model = models.pop()
if isinstance(model, type(unwrap_model(unet))):
unet_ = model
elif isinstance(model, type(unwrap_model(text_encoder))):
text_encoder_ = model
else:
raise ValueError(f'unexpected save model: {model.__class__}')
(lora_state_dict, network_alphas) = LoraLoaderMixin.lora_state_dict(input_dir)
unet_state_dict = {f"{k.replace('unet.', '')}": v for (k, v) in lora_state_dict.items() if k.startswith('unet.')}
unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict)
incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name='default')
if incompatible_keys is not None:
unexpected_keys = getattr(incompatible_keys, 'unexpected_keys', None)
if unexpected_keys:
logger.warning(f'Loading adapter weights from state_dict led to unexpected keys not found in the model: {unexpected_keys}. ')
if args.train_text_encoder:
_set_state_dict_into_text_encoder(lora_state_dict, prefix='text_encoder.', text_encoder=text_encoder_)
if args.mixed_precision == 'fp16':
models = [unet_]
if args.train_text_encoder:
models.append(text_encoder_)
cast_training_params(models, dtype=torch.float32)
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
if args.mixed_precision == 'fp16':
models = [unet]
if args.train_text_encoder:
models.append(text_encoder)
cast_training_params(models, dtype=torch.float32)
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.')
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
params_to_optimize = list(filter(lambda p: p.requires_grad, unet.parameters()))
if args.train_text_encoder:
params_to_optimize = params_to_optimize + list(filter(lambda p: p.requires_grad, text_encoder.parameters()))
optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon)
if args.pre_compute_text_embeddings:
def compute_text_embeddings(prompt):
with torch.no_grad():
text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length)
prompt_embeds = encode_prompt(text_encoder, text_inputs.input_ids, text_inputs.attention_mask, text_encoder_use_attention_mask=args.text_encoder_use_attention_mask)
return prompt_embeds
pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
validation_prompt_negative_prompt_embeds = compute_text_embeddings('')
if args.validation_prompt is not None:
validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt)
else:
validation_prompt_encoder_hidden_states = None
if args.class_prompt is not None:
pre_computed_class_prompt_encoder_hidden_states = compute_text_embeddings(args.class_prompt)
else:
pre_computed_class_prompt_encoder_hidden_states = None
text_encoder = None
tokenizer = None
gc.collect()
torch.cuda.empty_cache()
else:
pre_computed_encoder_hidden_states = None
validation_prompt_encoder_hidden_states = None
validation_prompt_negative_prompt_embeds = None
pre_computed_class_prompt_encoder_hidden_states = None
train_dataset = DreamBoothDataset(instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, class_num=args.num_class_images, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, encoder_hidden_states=pre_computed_encoder_hidden_states, class_prompt_encoder_hidden_states=pre_computed_class_prompt_encoder_hidden_states, tokenizer_max_length=args.tokenizer_max_length)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power)
if args.train_text_encoder:
(unet, text_encoder, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet, text_encoder, optimizer, train_dataloader, lr_scheduler)
else:
(unet, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if accelerator.is_main_process:
tracker_config = vars(copy.deepcopy(args))
accelerator.init_trackers('dreambooth-lora', config=tracker_config)
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num batches each epoch = {len(train_dataloader)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != 'latest':
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=lambda x: int(x.split('-')[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(range(0, args.max_train_steps), initial=initial_global_step, desc='Steps', disable=not accelerator.is_local_main_process)
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
if args.train_text_encoder:
text_encoder.train()
for (step, batch) in enumerate(train_dataloader):
with accelerator.accumulate(unet):
pixel_values = batch['pixel_values'].to(dtype=weight_dtype)
if vae is not None:
model_input = vae.encode(pixel_values).latent_dist.sample()
model_input = model_input * vae.config.scaling_factor
else:
model_input = pixel_values
noise = torch.randn_like(model_input)
(bsz, channels, height, width) = model_input.shape
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device)
timesteps = timesteps.long()
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
if args.pre_compute_text_embeddings:
encoder_hidden_states = batch['input_ids']
else:
encoder_hidden_states = encode_prompt(text_encoder, batch['input_ids'], batch['attention_mask'], text_encoder_use_attention_mask=args.text_encoder_use_attention_mask)
if unwrap_model(unet).config.in_channels == channels * 2:
noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
if args.class_labels_conditioning == 'timesteps':
class_labels = timesteps
else:
class_labels = None
model_pred = unet(noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels, return_dict=False)[0]
if model_pred.shape[1] == 6:
(model_pred, _) = torch.chunk(model_pred, 2, dim=1)
if noise_scheduler.config.prediction_type == 'epsilon':
target = noise
elif noise_scheduler.config.prediction_type == 'v_prediction':
target = noise_scheduler.get_velocity(model_input, noise, timesteps)
else:
raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}')
if args.with_prior_preservation:
(model_pred, model_pred_prior) = torch.chunk(model_pred, 2, dim=0)
(target, target_prior) = torch.chunk(target, 2, dim=0)
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction='mean')
loss = loss + args.prior_loss_weight * prior_loss
else:
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(params_to_optimize, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith('checkpoint')]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split('-')[1]))
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(f'{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints')
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}')
accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
logs = {'loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
if accelerator.is_main_process:
if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, unet=unwrap_model(unet), text_encoder=None if args.pre_compute_text_embeddings else unwrap_model(text_encoder), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype)
if args.pre_compute_text_embeddings:
pipeline_args = {'prompt_embeds': validation_prompt_encoder_hidden_states, 'negative_prompt_embeds': validation_prompt_negative_prompt_embeds}
else:
pipeline_args = {'prompt': args.validation_prompt}
images = log_validation(pipeline, args, accelerator, pipeline_args, epoch)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = unwrap_model(unet)
unet = unet.to(torch.float32)
unet_lora_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
if args.train_text_encoder:
text_encoder = unwrap_model(text_encoder)
text_encoder_state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(text_encoder))
else:
text_encoder_state_dict = None
LoraLoaderMixin.save_lora_weights(save_directory=args.output_dir, unet_lora_layers=unet_lora_state_dict, text_encoder_lora_layers=text_encoder_state_dict)
accelerator.end_training()
# File: autotrain-advanced-main/src/autotrain/trainers/dreambooth/train_xl.py
import contextlib
import gc
import itertools
import json
import logging
import math
import os
import random
import shutil
from pathlib import Path
import diffusers
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
from diffusers import AutoencoderKL, DDPMScheduler, DPMSolverMultistepScheduler, EDMEulerScheduler, EulerDiscreteScheduler, StableDiffusionXLPipeline, UNet2DConditionModel
from diffusers.loaders import LoraLoaderMixin
from diffusers.optimization import get_scheduler
from diffusers.training_utils import _set_state_dict_into_text_encoder, cast_training_params, compute_snr
from diffusers.utils import convert_state_dict_to_diffusers, convert_unet_state_dict_to_peft, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.torch_utils import is_compiled_module
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import insecure_hashlib
from packaging import version
from peft import LoraConfig, set_peft_model_state_dict
from peft.utils import get_peft_model_state_dict
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.transforms.functional import crop
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from autotrain import logger
def determine_scheduler_type(pretrained_model_name_or_path, revision):
model_index_filename = 'model_index.json'
if os.path.isdir(pretrained_model_name_or_path):
model_index = os.path.join(pretrained_model_name_or_path, model_index_filename)
else:
model_index = hf_hub_download(repo_id=pretrained_model_name_or_path, filename=model_index_filename, revision=revision)
with open(model_index, 'r') as f:
scheduler_type = json.load(f)['scheduler'][1]
return scheduler_type
def log_validation(pipeline, args, accelerator, pipeline_args, epoch, is_final_validation=False):
logger.info(f'Running validation... \n Generating {args.num_validation_images} images with prompt: {args.validation_prompt}.')
scheduler_args = {}
if not args.do_edm_style_training:
if 'variance_type' in pipeline.scheduler.config:
variance_type = pipeline.scheduler.config.variance_type
if variance_type in ['learned', 'learned_range']:
variance_type = 'fixed_small'
scheduler_args['variance_type'] = variance_type
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
inference_ctx = contextlib.nullcontext() if 'playground' in args.pretrained_model_name_or_path else torch.cuda.amp.autocast()
with inference_ctx:
images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)]
for tracker in accelerator.trackers:
phase_name = 'test' if is_final_validation else 'validation'
if tracker.name == 'tensorboard':
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images(phase_name, np_images, epoch, dataformats='NHWC')
del pipeline
torch.cuda.empty_cache()
return images
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str, subfolder: str='text_encoder'):
text_encoder_config = PretrainedConfig.from_pretrained(pretrained_model_name_or_path, subfolder=subfolder, revision=revision)
model_class = text_encoder_config.architectures[0]
if model_class == 'CLIPTextModel':
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == 'CLIPTextModelWithProjection':
from transformers import CLIPTextModelWithProjection
return CLIPTextModelWithProjection
else:
raise ValueError(f'{model_class} is not supported.')
class DreamBoothDataset(Dataset):
def __init__(self, instance_data_root, instance_prompt, class_prompt, class_data_root=None, class_num=None, size=1024, repeats=1, center_crop=False, random_flip=False):
self.size = size
self.resolution = size
self.center_crop = center_crop
self.instance_prompt = instance_prompt
self.custom_instance_prompts = None
self.class_prompt = class_prompt
self.random_flip = random_flip
self.instance_data_root = Path(instance_data_root)
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())]
self.custom_instance_prompts = None
self.instance_images = []
for img in instance_images:
self.instance_images.extend(itertools.repeat(img, repeats))
self.original_sizes = []
self.crop_top_lefts = []
self.pixel_values = []
train_resize = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)
train_crop = transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size)
train_flip = transforms.RandomHorizontalFlip(p=1.0)
train_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
for image in self.instance_images:
image = exif_transpose(image)
if not image.mode == 'RGB':
image = image.convert('RGB')
self.original_sizes.append((image.height, image.width))
image = train_resize(image)
if self.random_flip and random.random() < 0.5:
image = train_flip(image)
if self.center_crop:
y1 = max(0, int(round((image.height - self.resolution) / 2.0)))
x1 = max(0, int(round((image.width - self.resolution) / 2.0)))
image = train_crop(image)
else:
(y1, x1, h, w) = train_crop.get_params(image, (self.resolution, self.resolution))
image = crop(image, y1, x1, h, w)
crop_top_left = (y1, x1)
self.crop_top_lefts.append(crop_top_left)
image = train_transforms(image)
self.pixel_values.append(image)
self.num_instance_images = len(self.instance_images)
self._length = self.num_instance_images
if class_data_root is not None:
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
if class_num is not None:
self.num_class_images = min(len(self.class_images_path), class_num)
else:
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
else:
self.class_data_root = None
self.image_transforms = transforms.Compose([transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = self.pixel_values[index % self.num_instance_images]
original_size = self.original_sizes[index % self.num_instance_images]
crop_top_left = self.crop_top_lefts[index % self.num_instance_images]
example['instance_images'] = instance_image
example['original_size'] = original_size
example['crop_top_left'] = crop_top_left
if self.custom_instance_prompts:
caption = self.custom_instance_prompts[index % self.num_instance_images]
if caption:
example['instance_prompt'] = caption
else:
example['instance_prompt'] = self.instance_prompt
else:
example['instance_prompt'] = self.instance_prompt
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
class_image = exif_transpose(class_image)
if not class_image.mode == 'RGB':
class_image = class_image.convert('RGB')
example['class_images'] = self.image_transforms(class_image)
example['class_prompt'] = self.class_prompt
return example
def collate_fn(examples, with_prior_preservation=False):
pixel_values = [example['instance_images'] for example in examples]
prompts = [example['instance_prompt'] for example in examples]
original_sizes = [example['original_size'] for example in examples]
crop_top_lefts = [example['crop_top_left'] for example in examples]
if with_prior_preservation:
pixel_values += [example['class_images'] for example in examples]
prompts += [example['class_prompt'] for example in examples]
original_sizes += [example['original_size'] for example in examples]
crop_top_lefts += [example['crop_top_left'] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
batch = {'pixel_values': pixel_values, 'prompts': prompts, 'original_sizes': original_sizes, 'crop_top_lefts': crop_top_lefts}
return batch
class PromptDataset(Dataset):
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example['prompt'] = self.prompt
example['index'] = index
return example
def tokenize_prompt(tokenizer, prompt):
text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt')
text_input_ids = text_inputs.input_ids
return text_input_ids
def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
prompt_embeds_list = []
for (i, text_encoder) in enumerate(text_encoders):
if tokenizers is not None:
tokenizer = tokenizers[i]
text_input_ids = tokenize_prompt(tokenizer, prompt)
else:
assert text_input_ids_list is not None
text_input_ids = text_input_ids_list[i]
prompt_embeds = text_encoder(text_input_ids.to(text_encoder.device), output_hidden_states=True, return_dict=False)
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds[-1][-2]
(bs_embed, seq_len, _) = prompt_embeds.shape
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
return (prompt_embeds, pooled_prompt_embeds)
def main(args):
if args.report_to == 'wandb' and args.hub_token is not None:
raise ValueError('You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token. Please use `huggingface-cli login` to authenticate with the Hub.')
if args.do_edm_style_training and args.snr_gamma is not None:
raise ValueError('Min-SNR formulation is not supported when conducting EDM-style training.')
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs])
if args.report_to == 'wandb':
if not is_wandb_available():
raise ImportError('Make sure to install wandb if you want to use it for logging during training.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if args.seed is not None:
set_seed(args.seed)
if args.with_prior_preservation:
class_images_dir = Path(args.class_data_dir)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < args.num_class_images:
torch_dtype = torch.float16 if accelerator.device.type == 'cuda' else torch.float32
if args.prior_generation_precision == 'fp32':
torch_dtype = torch.float32
elif args.prior_generation_precision == 'fp16':
torch_dtype = torch.float16
elif args.prior_generation_precision == 'bf16':
torch_dtype = torch.bfloat16
pipeline = StableDiffusionXLPipeline.from_pretrained(args.pretrained_model_name_or_path, torch_dtype=torch_dtype, revision=args.revision, variant=args.variant)
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
logger.info(f'Number of class images to sample: {num_new_images}.')
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(sample_dataloader, desc='Generating class images', disable=not accelerator.is_local_main_process):
images = pipeline(example['prompt']).images
for (i, image) in enumerate(images):
hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
tokenizer_one = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer', revision=args.revision, use_fast=False)
tokenizer_two = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer_2', revision=args.revision, use_fast=False)
text_encoder_cls_one = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
text_encoder_cls_two = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision, subfolder='text_encoder_2')
scheduler_type = determine_scheduler_type(args.pretrained_model_name_or_path, args.revision)
if 'EDM' in scheduler_type:
args.do_edm_style_training = True
noise_scheduler = EDMEulerScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
logger.info('Performing EDM-style training!')
elif args.do_edm_style_training:
noise_scheduler = EulerDiscreteScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
logger.info('Performing EDM-style training!')
else:
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
text_encoder_one = text_encoder_cls_one.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision, variant=args.variant)
text_encoder_two = text_encoder_cls_two.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder_2', revision=args.revision, variant=args.variant)
vae_path = args.pretrained_model_name_or_path if args.pretrained_vae_model_name_or_path is None else args.pretrained_vae_model_name_or_path
vae = AutoencoderKL.from_pretrained(vae_path, subfolder='vae' if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision, variant=args.variant)
latents_mean = latents_std = None
if hasattr(vae.config, 'latents_mean') and vae.config.latents_mean is not None:
latents_mean = torch.tensor(vae.config.latents_mean).view(1, 4, 1, 1)
if hasattr(vae.config, 'latents_std') and vae.config.latents_std is not None:
latents_std = torch.tensor(vae.config.latents_std).view(1, 4, 1, 1)
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision, variant=args.variant)
vae.requires_grad_(False)
text_encoder_one.requires_grad_(False)
text_encoder_two.requires_grad_(False)
unet.requires_grad_(False)
weight_dtype = torch.float32
if accelerator.mixed_precision == 'fp16':
weight_dtype = torch.float16
elif accelerator.mixed_precision == 'bf16':
weight_dtype = torch.bfloat16
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=torch.float32)
text_encoder_one.to(accelerator.device, dtype=weight_dtype)
text_encoder_two.to(accelerator.device, dtype=weight_dtype)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse('0.0.16'):
logger.warning('xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details.')
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError('xformers is not available. Make sure it is installed correctly')
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
if args.train_text_encoder:
text_encoder_one.gradient_checkpointing_enable()
text_encoder_two.gradient_checkpointing_enable()
unet_lora_config = LoraConfig(r=args.rank, use_dora=args.use_dora, lora_alpha=args.rank, init_lora_weights='gaussian', target_modules=['to_k', 'to_q', 'to_v', 'to_out.0'])
unet.add_adapter(unet_lora_config)
if args.train_text_encoder:
text_lora_config = LoraConfig(r=args.rank, use_dora=args.use_dora, lora_alpha=args.rank, init_lora_weights='gaussian', target_modules=['q_proj', 'k_proj', 'v_proj', 'out_proj'])
text_encoder_one.add_adapter(text_lora_config)
text_encoder_two.add_adapter(text_lora_config)
def unwrap_model(model):
model = accelerator.unwrap_model(model)
model = model._orig_mod if is_compiled_module(model) else model
return model
def save_model_hook(models, weights, output_dir):
if accelerator.is_main_process:
unet_lora_layers_to_save = None
text_encoder_one_lora_layers_to_save = None
text_encoder_two_lora_layers_to_save = None
for model in models:
if isinstance(model, type(unwrap_model(unet))):
unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
elif isinstance(model, type(unwrap_model(text_encoder_one))):
text_encoder_one_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
elif isinstance(model, type(unwrap_model(text_encoder_two))):
text_encoder_two_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
else:
raise ValueError(f'unexpected save model: {model.__class__}')
weights.pop()
StableDiffusionXLPipeline.save_lora_weights(output_dir, unet_lora_layers=unet_lora_layers_to_save, text_encoder_lora_layers=text_encoder_one_lora_layers_to_save, text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save)
def load_model_hook(models, input_dir):
unet_ = None
text_encoder_one_ = None
text_encoder_two_ = None
while len(models) > 0:
model = models.pop()
if isinstance(model, type(unwrap_model(unet))):
unet_ = model
elif isinstance(model, type(unwrap_model(text_encoder_one))):
text_encoder_one_ = model
elif isinstance(model, type(unwrap_model(text_encoder_two))):
text_encoder_two_ = model
else:
raise ValueError(f'unexpected save model: {model.__class__}')
(lora_state_dict, network_alphas) = LoraLoaderMixin.lora_state_dict(input_dir)
unet_state_dict = {f"{k.replace('unet.', '')}": v for (k, v) in lora_state_dict.items() if k.startswith('unet.')}
unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict)
incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name='default')
if incompatible_keys is not None:
unexpected_keys = getattr(incompatible_keys, 'unexpected_keys', None)
if unexpected_keys:
logger.warning(f'Loading adapter weights from state_dict led to unexpected keys not found in the model: {unexpected_keys}. ')
if args.train_text_encoder:
_set_state_dict_into_text_encoder(lora_state_dict, prefix='text_encoder.', text_encoder=text_encoder_one_)
_set_state_dict_into_text_encoder(lora_state_dict, prefix='text_encoder_2.', text_encoder=text_encoder_two_)
if args.mixed_precision == 'fp16':
models = [unet_]
if args.train_text_encoder:
models.extend([text_encoder_one_, text_encoder_two_])
cast_training_params(models)
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
if args.mixed_precision == 'fp16':
models = [unet]
if args.train_text_encoder:
models.extend([text_encoder_one, text_encoder_two])
cast_training_params(models, dtype=torch.float32)
unet_lora_parameters = list(filter(lambda p: p.requires_grad, unet.parameters()))
if args.train_text_encoder:
text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters()))
text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters()))
unet_lora_parameters_with_lr = {'params': unet_lora_parameters, 'lr': args.learning_rate}
if args.train_text_encoder:
text_lora_parameters_one_with_lr = {'params': text_lora_parameters_one, 'weight_decay': args.adam_weight_decay_text_encoder, 'lr': args.text_encoder_lr if args.text_encoder_lr else args.learning_rate}
text_lora_parameters_two_with_lr = {'params': text_lora_parameters_two, 'weight_decay': args.adam_weight_decay_text_encoder, 'lr': args.text_encoder_lr if args.text_encoder_lr else args.learning_rate}
params_to_optimize = [unet_lora_parameters_with_lr, text_lora_parameters_one_with_lr, text_lora_parameters_two_with_lr]
else:
params_to_optimize = [unet_lora_parameters_with_lr]
if not (args.optimizer.lower() == 'prodigy' or args.optimizer.lower() == 'adamw'):
logger.warning(f'Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy].Defaulting to adamW')
args.optimizer = 'adamw'
if args.use_8bit_adam and (not args.optimizer.lower() == 'adamw'):
logger.warning(f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was set to {args.optimizer.lower()}")
if args.optimizer.lower() == 'adamw':
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.')
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
optimizer = optimizer_class(params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon)
if args.optimizer.lower() == 'prodigy':
try:
import prodigyopt
except ImportError:
raise ImportError('To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`')
optimizer_class = prodigyopt.Prodigy
if args.learning_rate <= 0.1:
logger.warning("Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0")
if args.train_text_encoder and args.text_encoder_lr:
logger.warning(f'Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr: {args.text_encoder_lr} and learning_rate: {args.learning_rate}. When using prodigy only learning_rate is used as the initial learning rate.')
params_to_optimize[1]['lr'] = args.learning_rate
params_to_optimize[2]['lr'] = args.learning_rate
optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), beta3=args.prodigy_beta3, weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, decouple=args.prodigy_decouple, use_bias_correction=args.prodigy_use_bias_correction, safeguard_warmup=args.prodigy_safeguard_warmup)
train_dataset = DreamBoothDataset(instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_prompt=args.class_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_num=args.num_class_images, size=args.resolution, repeats=args.repeats, center_crop=args.center_crop, random_flip=args.random_flip)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers)
def compute_time_ids(original_size, crops_coords_top_left):
target_size = (args.resolution, args.resolution)
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_time_ids = torch.tensor([add_time_ids])
add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
return add_time_ids
if not args.train_text_encoder:
tokenizers = [tokenizer_one, tokenizer_two]
text_encoders = [text_encoder_one, text_encoder_two]
def compute_text_embeddings(prompt, text_encoders, tokenizers):
with torch.no_grad():
(prompt_embeds, pooled_prompt_embeds) = encode_prompt(text_encoders, tokenizers, prompt)
prompt_embeds = prompt_embeds.to(accelerator.device)
pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
return (prompt_embeds, pooled_prompt_embeds)
if not args.train_text_encoder and (not train_dataset.custom_instance_prompts):
(instance_prompt_hidden_states, instance_pooled_prompt_embeds) = compute_text_embeddings(args.instance_prompt, text_encoders, tokenizers)
if args.with_prior_preservation:
if not args.train_text_encoder:
(class_prompt_hidden_states, class_pooled_prompt_embeds) = compute_text_embeddings(args.class_prompt, text_encoders, tokenizers)
if not args.train_text_encoder and (not train_dataset.custom_instance_prompts):
del tokenizers, text_encoders
gc.collect()
torch.cuda.empty_cache()
if not train_dataset.custom_instance_prompts:
if not args.train_text_encoder:
prompt_embeds = instance_prompt_hidden_states
unet_add_text_embeds = instance_pooled_prompt_embeds
if args.with_prior_preservation:
prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0)
unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
else:
tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt)
tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt)
if args.with_prior_preservation:
class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt)
class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt)
tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0)
tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power)
if args.train_text_encoder:
(unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler)
else:
(unet, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if accelerator.is_main_process:
tracker_name = 'dreambooth-lora-sd-xl' if 'playground' not in args.pretrained_model_name_or_path else 'dreambooth-lora-playground'
accelerator.init_trackers(tracker_name, config=vars(args))
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num batches each epoch = {len(train_dataloader)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != 'latest':
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=lambda x: int(x.split('-')[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(range(0, args.max_train_steps), initial=initial_global_step, desc='Steps', disable=not accelerator.is_local_main_process)
def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
sigmas = noise_scheduler.sigmas.to(device=accelerator.device, dtype=dtype)
schedule_timesteps = noise_scheduler.timesteps.to(accelerator.device)
timesteps = timesteps.to(accelerator.device)
step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
sigma = sigmas[step_indices].flatten()
while len(sigma.shape) < n_dim:
sigma = sigma.unsqueeze(-1)
return sigma
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
if args.train_text_encoder:
text_encoder_one.train()
text_encoder_two.train()
accelerator.unwrap_model(text_encoder_one).text_model.embeddings.requires_grad_(True)
accelerator.unwrap_model(text_encoder_two).text_model.embeddings.requires_grad_(True)
for (step, batch) in enumerate(train_dataloader):
with accelerator.accumulate(unet):
pixel_values = batch['pixel_values'].to(dtype=vae.dtype)
prompts = batch['prompts']
if train_dataset.custom_instance_prompts:
if not args.train_text_encoder:
(prompt_embeds, unet_add_text_embeds) = compute_text_embeddings(prompts, text_encoders, tokenizers)
else:
tokens_one = tokenize_prompt(tokenizer_one, prompts)
tokens_two = tokenize_prompt(tokenizer_two, prompts)
model_input = vae.encode(pixel_values).latent_dist.sample()
if latents_mean is None and latents_std is None:
model_input = model_input * vae.config.scaling_factor
if args.pretrained_vae_model_name_or_path is None:
model_input = model_input.to(weight_dtype)
else:
latents_mean = latents_mean.to(device=model_input.device, dtype=model_input.dtype)
latents_std = latents_std.to(device=model_input.device, dtype=model_input.dtype)
model_input = (model_input - latents_mean) * vae.config.scaling_factor / latents_std
model_input = model_input.to(dtype=weight_dtype)
noise = torch.randn_like(model_input)
bsz = model_input.shape[0]
if not args.do_edm_style_training:
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device)
timesteps = timesteps.long()
else:
indices = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,))
timesteps = noise_scheduler.timesteps[indices].to(device=model_input.device)
noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
if args.do_edm_style_training:
sigmas = get_sigmas(timesteps, len(noisy_model_input.shape), noisy_model_input.dtype)
if 'EDM' in scheduler_type:
inp_noisy_latents = noise_scheduler.precondition_inputs(noisy_model_input, sigmas)
else:
inp_noisy_latents = noisy_model_input / (sigmas ** 2 + 1) ** 0.5
add_time_ids = torch.cat([compute_time_ids(original_size=s, crops_coords_top_left=c) for (s, c) in zip(batch['original_sizes'], batch['crop_top_lefts'])])
if not train_dataset.custom_instance_prompts:
elems_to_repeat_text_embeds = bsz // 2 if args.with_prior_preservation else bsz
else:
elems_to_repeat_text_embeds = 1
if not args.train_text_encoder:
unet_added_conditions = {'time_ids': add_time_ids, 'text_embeds': unet_add_text_embeds.repeat(elems_to_repeat_text_embeds, 1)}
prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
model_pred = unet(inp_noisy_latents if args.do_edm_style_training else noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions, return_dict=False)[0]
else:
unet_added_conditions = {'time_ids': add_time_ids}
(prompt_embeds, pooled_prompt_embeds) = encode_prompt(text_encoders=[text_encoder_one, text_encoder_two], tokenizers=None, prompt=None, text_input_ids_list=[tokens_one, tokens_two])
unet_added_conditions.update({'text_embeds': pooled_prompt_embeds.repeat(elems_to_repeat_text_embeds, 1)})
prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
model_pred = unet(inp_noisy_latents if args.do_edm_style_training else noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions, return_dict=False)[0]
weighting = None
if args.do_edm_style_training:
if 'EDM' in scheduler_type:
model_pred = noise_scheduler.precondition_outputs(noisy_model_input, model_pred, sigmas)
elif noise_scheduler.config.prediction_type == 'epsilon':
model_pred = model_pred * -sigmas + noisy_model_input
elif noise_scheduler.config.prediction_type == 'v_prediction':
model_pred = model_pred * (-sigmas / (sigmas ** 2 + 1) ** 0.5) + noisy_model_input / (sigmas ** 2 + 1)
if 'EDM' not in scheduler_type:
weighting = (sigmas ** (-2.0)).float()
if noise_scheduler.config.prediction_type == 'epsilon':
target = model_input if args.do_edm_style_training else noise
elif noise_scheduler.config.prediction_type == 'v_prediction':
target = model_input if args.do_edm_style_training else noise_scheduler.get_velocity(model_input, noise, timesteps)
else:
raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}')
if args.with_prior_preservation:
(model_pred, model_pred_prior) = torch.chunk(model_pred, 2, dim=0)
(target, target_prior) = torch.chunk(target, 2, dim=0)
if weighting is not None:
prior_loss = torch.mean((weighting.float() * (model_pred_prior.float() - target_prior.float()) ** 2).reshape(target_prior.shape[0], -1), 1)
prior_loss = prior_loss.mean()
else:
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction='mean')
if args.snr_gamma is None:
if weighting is not None:
loss = torch.mean((weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1)
loss = loss.mean()
else:
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
else:
snr = compute_snr(noise_scheduler, timesteps)
base_weight = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr
if noise_scheduler.config.prediction_type == 'v_prediction':
mse_loss_weights = base_weight + 1
else:
mse_loss_weights = base_weight
loss = F.mse_loss(model_pred.float(), target.float(), reduction='none')
loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
loss = loss.mean()
if args.with_prior_preservation:
loss = loss + args.prior_loss_weight * prior_loss
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two) if args.train_text_encoder else unet_lora_parameters
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith('checkpoint')]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split('-')[1]))
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(f'{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints')
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}')
accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
logs = {'loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
if accelerator.is_main_process:
if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
if not args.train_text_encoder:
text_encoder_one = text_encoder_cls_one.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision, variant=args.variant)
text_encoder_two = text_encoder_cls_two.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder_2', revision=args.revision, variant=args.variant)
pipeline = StableDiffusionXLPipeline.from_pretrained(args.pretrained_model_name_or_path, vae=vae, text_encoder=accelerator.unwrap_model(text_encoder_one), text_encoder_2=accelerator.unwrap_model(text_encoder_two), unet=accelerator.unwrap_model(unet), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype)
pipeline_args = {'prompt': args.validation_prompt}
images = log_validation(pipeline, args, accelerator, pipeline_args, epoch)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = unwrap_model(unet)
unet = unet.to(torch.float32)
unet_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
if args.train_text_encoder:
text_encoder_one = unwrap_model(text_encoder_one)
text_encoder_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(text_encoder_one.to(torch.float32)))
text_encoder_two = unwrap_model(text_encoder_two)
text_encoder_2_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(text_encoder_two.to(torch.float32)))
else:
text_encoder_lora_layers = None
text_encoder_2_lora_layers = None
StableDiffusionXLPipeline.save_lora_weights(save_directory=args.output_dir, unet_lora_layers=unet_lora_layers, text_encoder_lora_layers=text_encoder_lora_layers, text_encoder_2_lora_layers=text_encoder_2_lora_layers)
accelerator.end_training()
# File: autotrain-advanced-main/src/autotrain/trainers/dreambooth/trainer.py
import itertools
import math
import os
import shutil
import torch
import torch.nn.functional as F
from diffusers import StableDiffusionXLPipeline
from diffusers.loaders import LoraLoaderMixin, text_encoder_lora_state_dict
from diffusers.optimization import get_scheduler
from huggingface_hub import create_repo, upload_folder
from tqdm import tqdm
from autotrain import logger
from autotrain.trainers.dreambooth import utils
class Trainer:
def __init__(self, unet, vae, train_dataloader, train_dataset, text_encoders, config, optimizer, accelerator, noise_scheduler, weight_dtype, text_lora_parameters, unet_lora_parameters, tokenizers):
self.train_dataloader = train_dataloader
self.config = config
self.optimizer = optimizer
self.accelerator = accelerator
self.unet = unet
self.vae = vae
self.noise_scheduler = noise_scheduler
self.train_dataset = train_dataset
self.weight_dtype = weight_dtype
self.text_lora_parameters = text_lora_parameters
self.unet_lora_parameters = unet_lora_parameters
self.tokenizers = tokenizers
self.text_encoders = text_encoders
if self.config.xl:
self._setup_xl()
self.text_encoder1 = text_encoders[0]
self.text_encoder2 = None
if len(text_encoders) == 2:
self.text_encoder2 = text_encoders[1]
overrode_max_train_steps = False
self.num_update_steps_per_epoch = math.ceil(len(train_dataloader) / config.gradient_accumulation)
if self.config.num_steps is None:
self.config.num_steps = self.config.epochs * self.num_update_steps_per_epoch
overrode_max_train_steps = True
self.scheduler = get_scheduler(self.config.scheduler, optimizer=self.optimizer, num_warmup_steps=self.config.warmup_steps * self.accelerator.num_processes, num_training_steps=self.config.num_steps * self.accelerator.num_processes, num_cycles=self.config.num_cycles, power=self.config.lr_power)
if self.config.train_text_encoder:
if len(text_encoders) == 1:
(self.unet, self.text_encoder1, self.optimizer, self.train_dataloader, self.scheduler) = self.accelerator.prepare(self.unet, self.text_encoder1, self.optimizer, self.train_dataloader, self.scheduler)
elif len(text_encoders) == 2:
(self.unet, self.text_encoder1, self.text_encoder2, self.optimizer, self.train_dataloader, self.scheduler) = self.accelerator.prepare(self.unet, self.text_encoder1, self.text_encoder2, self.optimizer, self.train_dataloader, self.scheduler)
else:
(self.unet, self.optimizer, self.train_dataloader, self.scheduler) = accelerator.prepare(self.unet, self.optimizer, self.train_dataloader, self.scheduler)
self.num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.config.gradient_accumulation)
if overrode_max_train_steps:
self.config.num_steps = self.config.epochs * self.num_update_steps_per_epoch
self.config.epochs = math.ceil(self.config.num_steps / self.num_update_steps_per_epoch)
if self.accelerator.is_main_process:
self.accelerator.init_trackers('dreambooth')
self.total_batch_size = self.config.batch_size * self.accelerator.num_processes * self.config.gradient_accumulation
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(self.train_dataset)}')
logger.info(f' Num batches each epoch = {len(self.train_dataloader)}')
logger.info(f' Num Epochs = {self.config.epochs}')
logger.info(f' Instantaneous batch size per device = {config.batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {self.total_batch_size}')
logger.info(f' Gradient Accumulation steps = {self.config.gradient_accumulation}')
logger.info(f' Total optimization steps = {self.config.num_steps}')
logger.info(f' Training config = {self.config}')
self.global_step = 0
self.first_epoch = 0
if config.resume_from_checkpoint:
self._resume_from_checkpoint()
def compute_text_embeddings(self, prompt):
logger.info(f'Computing text embeddings for prompt: {prompt}')
with torch.no_grad():
(prompt_embeds, pooled_prompt_embeds) = utils.encode_prompt_xl(self.text_encoders, self.tokenizers, prompt)
prompt_embeds = prompt_embeds.to(self.accelerator.device)
pooled_prompt_embeds = pooled_prompt_embeds.to(self.accelerator.device)
return (prompt_embeds, pooled_prompt_embeds)
def compute_time_ids(self):
original_size = (self.config.resolution, self.config.resolution)
target_size = (self.config.resolution, self.config.resolution)
crops_coords_top_left = (0, 0)
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_time_ids = torch.tensor([add_time_ids])
add_time_ids = add_time_ids.to(self.accelerator.device, dtype=self.weight_dtype)
return add_time_ids
def _setup_xl(self):
instance_time_ids = self.compute_time_ids()
if not self.config.train_text_encoder:
(instance_prompt_hidden_states, instance_pooled_prompt_embeds) = self.compute_text_embeddings(self.config.prompt)
if self.config.prior_preservation:
class_time_ids = self.compute_time_ids()
if not self.config.train_text_encoder:
(class_prompt_hidden_states, class_pooled_prompt_embeds) = self.compute_text_embeddings(self.config.class_prompt)
self.add_time_ids = instance_time_ids
if self.config.prior_preservation:
self.add_time_ids = torch.cat([self.add_time_ids, class_time_ids], dim=0)
if not self.config.train_text_encoder:
self.prompt_embeds = instance_prompt_hidden_states
self.unet_add_text_embeds = instance_pooled_prompt_embeds
if self.config.prior_preservation:
self.prompt_embeds = torch.cat([self.prompt_embeds, class_prompt_hidden_states], dim=0)
self.unet_add_text_embeds = torch.cat([self.unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
else:
self.tokens_one = utils.tokenize_prompt(self.tokenizers[0], self.config.prompt).input_ids
self.tokens_two = utils.tokenize_prompt(self.tokenizers[1], self.config.prompt).input_ids
if self.config.prior_preservation:
class_tokens_one = utils.tokenize_prompt(self.tokenizers[0], self.config.class_prompt).input_ids
class_tokens_two = utils.tokenize_prompt(self.tokenizers[1], self.config.class_prompt).input_ids
self.tokens_one = torch.cat([self.tokens_one, class_tokens_one], dim=0)
self.tokens_two = torch.cat([self.tokens_two, class_tokens_two], dim=0)
def _resume_from_checkpoint(self):
if self.config.resume_from_checkpoint != 'latest':
path = os.path.basename(self.config.resume_from_checkpoint)
else:
dirs = os.listdir(self.config.project_name)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=lambda x: int(x.split('-')[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
self.accelerator.print(f"Checkpoint '{self.config.resume_from_checkpoint}' does not exist. Starting a new training run.")
self.config.resume_from_checkpoint = None
else:
self.accelerator.print(f'Resuming from checkpoint {path}')
self.accelerator.load_state(os.path.join(self.config.project_name, path))
self.global_step = int(path.split('-')[1])
resume_global_step = self.global_step * self.config.gradient_accumulation
self.first_epoch = self.global_step // self.num_update_steps_per_epoch
self.resume_step = resume_global_step % (self.num_update_steps_per_epoch * self.config.gradient_accumulation)
def _calculate_loss(self, model_pred, noise, model_input, timesteps):
if model_pred.shape[1] == 6 and (not self.config.xl):
(model_pred, _) = torch.chunk(model_pred, 2, dim=1)
if self.noise_scheduler.config.prediction_type == 'epsilon':
target = noise
elif self.noise_scheduler.config.prediction_type == 'v_prediction':
target = self.noise_scheduler.get_velocity(model_input, noise, timesteps)
else:
raise ValueError(f'Unknown prediction type {self.noise_scheduler.config.prediction_type}')
if self.config.prior_preservation:
(model_pred, model_pred_prior) = torch.chunk(model_pred, 2, dim=0)
(target, target_prior) = torch.chunk(target, 2, dim=0)
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction='mean')
loss = loss + self.config.prior_loss_weight * prior_loss
else:
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
return loss
def _clip_gradients(self):
if self.accelerator.sync_gradients:
if len(self.text_lora_parameters) == 0:
params_to_clip = self.unet_lora_parameters
elif len(self.text_lora_parameters) == 1:
params_to_clip = itertools.chain(self.unet_lora_parameters, self.text_lora_parameters[0])
elif len(self.text_lora_parameters) == 2:
params_to_clip = itertools.chain(self.unet_lora_parameters, self.text_lora_parameters[0], self.text_lora_parameters[1])
else:
raise ValueError('More than 2 text encoders are not supported.')
self.accelerator.clip_grad_norm_(params_to_clip, self.config.max_grad_norm)
def _save_checkpoint(self):
if self.accelerator.is_main_process:
if self.global_step % self.config.checkpointing_steps == 0:
if self.config.checkpoints_total_limit is not None:
checkpoints = os.listdir(self.config.project_name)
checkpoints = [d for d in checkpoints if d.startswith('checkpoint')]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split('-')[1]))
if len(checkpoints) >= self.config.checkpoints_total_limit:
num_to_remove = len(checkpoints) - self.config.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(f'{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints')
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(self.config.project_name, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(self.config.project_name, f'checkpoint-{self.global_step}')
self.accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
def _get_model_pred(self, batch, channels, noisy_model_input, timesteps, bsz):
if self.config.xl:
elems_to_repeat = bsz // 2 if self.config.prior_preservation else bsz
if not self.config.train_text_encoder:
unet_added_conditions = {'time_ids': self.add_time_ids.repeat(elems_to_repeat, 1), 'text_embeds': self.unet_add_text_embeds.repeat(elems_to_repeat, 1)}
model_pred = self.unet(noisy_model_input, timesteps, self.prompt_embeds.repeat(elems_to_repeat, 1, 1), added_cond_kwargs=unet_added_conditions).sample
else:
unet_added_conditions = {'time_ids': self.add_time_ids.repeat(elems_to_repeat, 1)}
(prompt_embeds, pooled_prompt_embeds) = utils.encode_prompt_xl(text_encoders=self.text_encoders, tokenizers=None, prompt=None, text_input_ids_list=[self.tokens_one, self.tokens_two])
unet_added_conditions.update({'text_embeds': pooled_prompt_embeds.repeat(elems_to_repeat, 1)})
prompt_embeds = prompt_embeds.repeat(elems_to_repeat, 1, 1)
model_pred = self.unet(noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs=unet_added_conditions).sample
else:
if self.config.pre_compute_text_embeddings:
encoder_hidden_states = batch['input_ids']
else:
encoder_hidden_states = utils.encode_prompt(self.text_encoder1, batch['input_ids'], batch['attention_mask'], text_encoder_use_attention_mask=self.config.text_encoder_use_attention_mask)
if self.accelerator.unwrap_model(self.unet).config.in_channels == channels * 2:
noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
if self.config.class_labels_conditioning == 'timesteps':
class_labels = timesteps
else:
class_labels = None
model_pred = self.unet(noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels).sample
return model_pred
def train(self):
progress_bar = tqdm(range(self.global_step, self.config.num_steps), disable=not self.accelerator.is_local_main_process)
progress_bar.set_description('Steps')
for epoch in range(self.first_epoch, self.config.epochs):
self.unet.train()
if self.config.train_text_encoder:
self.text_encoder1.train()
if self.config.xl:
self.text_encoder2.train()
for (step, batch) in enumerate(self.train_dataloader):
if self.config.resume_from_checkpoint and epoch == self.first_epoch and (step < self.resume_step):
if step % self.config.gradient_accumulation == 0:
progress_bar.update(1)
continue
with self.accelerator.accumulate(self.unet):
if self.config.xl:
pixel_values = batch['pixel_values']
else:
pixel_values = batch['pixel_values'].to(dtype=self.weight_dtype)
if self.vae is not None:
model_input = self.vae.encode(pixel_values).latent_dist.sample()
model_input = model_input * self.vae.config.scaling_factor
model_input = model_input.to(dtype=self.weight_dtype)
else:
model_input = pixel_values
noise = torch.randn_like(model_input)
(bsz, channels, height, width) = model_input.shape
timesteps = torch.randint(0, self.noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device)
timesteps = timesteps.long()
noisy_model_input = self.noise_scheduler.add_noise(model_input, noise, timesteps)
model_pred = self._get_model_pred(batch, channels, noisy_model_input, timesteps, bsz)
loss = self._calculate_loss(model_pred, noise, model_input, timesteps)
self.accelerator.backward(loss)
self._clip_gradients()
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
if self.accelerator.sync_gradients:
progress_bar.update(1)
self.global_step += 1
self._save_checkpoint()
logs = {'loss': loss.detach().item(), 'lr': self.scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
self.accelerator.log(logs, step=self.global_step)
if self.global_step >= self.config.num_steps:
break
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
self.unet = self.accelerator.unwrap_model(self.unet)
self.unet = self.unet.to(torch.float32)
unet_lora_layers = utils.unet_attn_processors_state_dict(self.unet)
text_encoder_lora_layers_1 = None
text_encoder_lora_layers_2 = None
if self.text_encoder1 is not None and self.config.train_text_encoder:
text_encoder1 = self.accelerator.unwrap_model(self.text_encoder1)
text_encoder1 = text_encoder1.to(torch.float32)
text_encoder_lora_layers_1 = text_encoder_lora_state_dict(text_encoder1)
if self.text_encoder2 is not None and self.config.train_text_encoder:
text_encoder2 = self.accelerator.unwrap_model(self.text_encoder2)
text_encoder2 = text_encoder2.to(torch.float32)
text_encoder_lora_layers_2 = text_encoder_lora_state_dict(text_encoder2)
if self.config.xl:
StableDiffusionXLPipeline.save_lora_weights(save_directory=self.config.project_name, unet_lora_layers=unet_lora_layers, text_encoder_lora_layers=text_encoder_lora_layers_1, text_encoder_2_lora_layers=text_encoder_lora_layers_2, safe_serialization=True)
else:
LoraLoaderMixin.save_lora_weights(save_directory=self.config.project_name, unet_lora_layers=unet_lora_layers, text_encoder_lora_layers=text_encoder_lora_layers_1, safe_serialization=True)
self.accelerator.end_training()
def push_to_hub(self):
repo_id = create_repo(repo_id=f'{self.config.username}/{self.config.project_name}', exist_ok=True, private=True, token=self.config.token).repo_id
utils.create_model_card(repo_id, base_model=self.config.model, train_text_encoder=self.config.train_text_encoder, prompt=self.config.prompt, repo_folder=self.config.project_name)
upload_folder(repo_id=repo_id, folder_path=self.config.project_name, commit_message='End of training', ignore_patterns=['step_*', 'epoch_*'], token=self.config.token)
# File: autotrain-advanced-main/src/autotrain/trainers/dreambooth/utils.py
import os
from huggingface_hub import list_models
from autotrain import logger
VALID_IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG']
try:
XL_MODELS = [m.id for m in list(list_models(task='text-to-image', sort='downloads', limit=200, direction=-1, filter=['diffusers:StableDiffusionXLPipeline']))]
except Exception:
logger.info('Unable to reach Hugging Face Hub, using default models as XL models.')
XL_MODELS = ['stabilityai/stable-diffusion-xl-base-1.0', 'stabilityai/stable-diffusion-xl-base-0.9', 'diffusers/stable-diffusion-xl-base-1.0', 'stabilityai/sdxl-turbo']
def save_model_card_xl(repo_id: str, base_model=str, train_text_encoder=False, instance_prompt=str, repo_folder=None, vae_path=None):
img_str = ''
yaml = f'\n---\ntags:\n- autotrain\n- stable-diffusion-xl\n- stable-diffusion-xl-diffusers\n- text-to-image\n- diffusers\n- lora\n- template:sd-lora\n{img_str}\nbase_model: {base_model}\ninstance_prompt: {instance_prompt}\nlicense: openrail++\n---\n '
model_card = f'\n# AutoTrain SDXL LoRA DreamBooth - {repo_id}\n\n\n\n## Model description\n\nThese are {repo_id} LoRA adaption weights for {base_model}.\n\nThe weights were trained using [DreamBooth](https://dreambooth.github.io/).\n\nLoRA for the text encoder was enabled: {train_text_encoder}.\n\nSpecial VAE used for training: {vae_path}.\n\n## Trigger words\n\nYou should use {instance_prompt} to trigger the image generation.\n\n## Download model\n\nWeights for this model are available in Safetensors format.\n\n[Download]({repo_id}/tree/main) them in the Files & versions tab.\n\n'
with open(os.path.join(repo_folder, 'README.md'), 'w') as f:
f.write(yaml + model_card)
def save_model_card(repo_id: str, base_model=str, train_text_encoder=False, instance_prompt=str, repo_folder=None):
img_str = ''
model_description = f'\n# AutoTrain LoRA DreamBooth - {repo_id}\n\nThese are LoRA adaption weights for {base_model}. The weights were trained on {instance_prompt} using [DreamBooth](https://dreambooth.github.io/).\nLoRA for the text encoder was enabled: {train_text_encoder}.\n'
yaml = f'\n---\ntags:\n- autotrain\n- stable-diffusion\n- stable-diffusion-diffusers\n- text-to-image\n- diffusers\n- lora\n- template:sd-lora\n{img_str}\nbase_model: {base_model}\ninstance_prompt: {instance_prompt}\nlicense: openrail++\n---\n '
with open(os.path.join(repo_folder, 'README.md'), 'w') as f:
f.write(yaml + model_description)
# File: autotrain-advanced-main/src/autotrain/trainers/extractive_question_answering/__main__.py
import argparse
import copy
import json
from functools import partial
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, EarlyStoppingCallback, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.extractive_question_answering import utils
from autotrain.trainers.extractive_question_answering.dataset import ExtractiveQuestionAnsweringDataset
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = ExtractiveQuestionAnsweringParams(**config)
train_data = None
valid_data = None
if config.train_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
logger.info(train_data)
if config.valid_split is not None:
logger.info(valid_data)
model_config = AutoConfig.from_pretrained(config.model, allow_remote_code=ALLOW_REMOTE_CODE, token=config.token)
try:
model = AutoModelForQuestionAnswering.from_pretrained(config.model, config=model_config, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
except OSError:
model = AutoModelForQuestionAnswering.from_pretrained(config.model, config=model_config, from_tf=True, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
use_v2 = False
if config.valid_split is not None:
id_column = list(range(len(valid_data)))
for data in valid_data:
if -1 in data[config.answer_column]['answer_start']:
use_v2 = True
break
valid_data = valid_data.add_column('id', id_column)
column_names = valid_data.column_names
partial_process = partial(utils.prepare_qa_validation_features, tokenizer=tokenizer, config=config)
processed_eval_dataset = valid_data.map(partial_process, batched=True, remove_columns=column_names, num_proc=2, desc='Running tokenizer on validation dataset')
orig_valid_data = copy.deepcopy(valid_data)
train_data = ExtractiveQuestionAnsweringDataset(data=train_data, tokenizer=tokenizer, config=config)
if config.valid_split is not None:
valid_data = ExtractiveQuestionAnsweringDataset(data=valid_data, tokenizer=tokenizer, config=config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
if config.valid_split is not None:
logger.info(processed_eval_dataset)
compute_metrics = partial(utils.compute_metrics, eval_dataset=processed_eval_dataset, eval_examples=orig_valid_data, config=config, use_v2=use_v2)
else:
compute_metrics = None
args = TrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, compute_metrics=compute_metrics)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
tokenizer.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer)
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
args = parse_args()
training_config = json.load(open(args.training_config))
config = ExtractiveQuestionAnsweringParams(**training_config)
train(config)
# File: autotrain-advanced-main/src/autotrain/trainers/extractive_question_answering/dataset.py
from functools import partial
from autotrain import logger
def _prepare_dataset(examples, tokenizer, config):
pad_on_right = tokenizer.padding_side == 'right'
tokenized_examples = tokenizer(examples[config.question_column if pad_on_right else config.text_column], examples[config.text_column if pad_on_right else config.question_column], truncation='only_second' if pad_on_right else 'only_first', max_length=config.max_seq_length, stride=config.max_doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding='max_length')
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
offset_mapping = tokenized_examples.pop('offset_mapping')
tokenized_examples['start_positions'] = []
tokenized_examples['end_positions'] = []
for (i, offsets) in enumerate(offset_mapping):
input_ids = tokenized_examples['input_ids'][i]
if tokenizer.cls_token_id in input_ids:
cls_index = input_ids.index(tokenizer.cls_token_id)
elif tokenizer.bos_token_id in input_ids:
cls_index = input_ids.index(tokenizer.bos_token_id)
else:
cls_index = 0
sequence_ids = tokenized_examples.sequence_ids(i)
sample_index = sample_mapping[i]
answers = examples[config.answer_column][sample_index]
if len(answers['answer_start']) == 0:
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
start_char = answers['answer_start'][0]
end_char = start_char + len(answers['text'][0])
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples['start_positions'].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples['end_positions'].append(token_end_index + 1)
return tokenized_examples
class ExtractiveQuestionAnsweringDataset:
def __init__(self, data, tokenizer, config):
self.data = data
self.tokenizer = tokenizer
self.config = config
logger.info('Processing data for Extractive QA')
mapping_function = partial(_prepare_dataset, tokenizer=self.tokenizer, config=self.config)
self.tokenized_data = self.data.map(mapping_function, batched=True, remove_columns=self.data.column_names)
def __len__(self):
return len(self.tokenized_data)
def __getitem__(self, item):
return self.tokenized_data[item]
# File: autotrain-advanced-main/src/autotrain/trainers/extractive_question_answering/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class ExtractiveQuestionAnsweringParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('bert-base-uncased', title='Model name')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
max_seq_length: int = Field(128, title='Max sequence length')
max_doc_stride: int = Field(128, title='Max doc stride')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
text_column: str = Field('context', title='context/text column')
question_column: str = Field('question', title='question column')
answer_column: str = Field('answers', title='answer column')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
username: Optional[str] = Field(None, title='Hugging Face Username')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/extractive_question_answering/utils.py
import collections
import json
import os
import numpy as np
from datasets import load_metric
from transformers import EvalPrediction
from autotrain import logger
MODEL_CARD = '\n---\nlibrary_name: transformers\ntags:\n- autotrain\n- question-answering{base_model}\nwidget:\n- text: "Who loves AutoTrain?"\n context: "Everyone loves AutoTrain"{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Extractive Question Answering\n\n## Validation Metrics\n\n{validation_metrics}\n\n## Usage\n\n\n```python\nimport torch\n\nfrom transformers import AutoModelForQuestionAnswering, AutoTokenizer\n\nmodel = AutoModelForQuestionAnswering.from_pretrained(...)\n\ntokenizer = AutoTokenizer.from_pretrained(...)\n\nfrom transformers import BertTokenizer, BertForQuestionAnswering\n\nquestion, text = "Who loves AutoTrain?", "Everyone loves AutoTrain"\n\ninputs = tokenizer(question, text, return_tensors=\'pt\')\n\nstart_positions = torch.tensor([1])\n\nend_positions = torch.tensor([3])\n\noutputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)\n\nloss = outputs.loss\n\nstart_scores = outputs.start_logits\n\nend_scores = outputs.end_logits\n```\n'
SQUAD_METRIC = load_metric('squad')
SQUAD_V2_METRIC = load_metric('squad_v2')
def postprocess_qa_predictions(examples, features, predictions, config, version_2_with_negative=False, n_best_size=20, max_answer_length=30, null_score_diff_threshold=0.0, output_dir=None, prefix=None):
if len(predictions) != 2:
raise ValueError('`predictions` should be a tuple with two elements (start_logits, end_logits).')
(all_start_logits, all_end_logits) = predictions
if len(predictions[0]) != len(features):
raise ValueError(f'Got {len(predictions[0])} predictions and {len(features)} features.')
example_id_to_index = {k: i for (i, k) in enumerate(examples['id'])}
features_per_example = collections.defaultdict(list)
for (i, feature) in enumerate(features):
features_per_example[example_id_to_index[feature['example_id']]].append(i)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
logger.info(f'Post-processing {len(examples)} example predictions split into {len(features)} features.')
for (example_index, example) in enumerate(examples):
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
for feature_index in feature_indices:
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
offset_mapping = features[feature_index]['offset_mapping']
token_is_max_context = features[feature_index].get('token_is_max_context', None)
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction['score'] > feature_null_score:
min_null_prediction = {'offsets': (0, 0), 'score': feature_null_score, 'start_logit': start_logits[0], 'end_logit': end_logits[0]}
start_indexes = np.argsort(start_logits)[-1:-n_best_size - 1:-1].tolist()
end_indexes = np.argsort(end_logits)[-1:-n_best_size - 1:-1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
if start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or (len(offset_mapping[start_index]) < 2) or (offset_mapping[end_index] is None) or (len(offset_mapping[end_index]) < 2):
continue
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
if token_is_max_context is not None and (not token_is_max_context.get(str(start_index), False)):
continue
prelim_predictions.append({'offsets': (offset_mapping[start_index][0], offset_mapping[end_index][1]), 'score': start_logits[start_index] + end_logits[end_index], 'start_logit': start_logits[start_index], 'end_logit': end_logits[end_index]})
if version_2_with_negative and min_null_prediction is not None:
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction['score']
predictions = sorted(prelim_predictions, key=lambda x: x['score'], reverse=True)[:n_best_size]
if version_2_with_negative and min_null_prediction is not None and (not any((p['offsets'] == (0, 0) for p in predictions))):
predictions.append(min_null_prediction)
context = example[config.text_column]
for pred in predictions:
offsets = pred.pop('offsets')
pred['text'] = context[offsets[0]:offsets[1]]
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]['text'] == ''):
predictions.insert(0, {'text': 'empty', 'start_logit': 0.0, 'end_logit': 0.0, 'score': 0.0})
scores = np.array([pred.pop('score') for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
for (prob, pred) in zip(probs, predictions):
pred['probability'] = prob
if not version_2_with_negative:
all_predictions[example['id']] = predictions[0]['text']
else:
i = 0
while predictions[i]['text'] == '':
i += 1
best_non_null_pred = predictions[i]
score_diff = null_score - best_non_null_pred['start_logit'] - best_non_null_pred['end_logit']
scores_diff_json[example['id']] = float(score_diff)
if score_diff > null_score_diff_threshold:
all_predictions[example['id']] = ''
else:
all_predictions[example['id']] = best_non_null_pred['text']
all_nbest_json[example['id']] = [{k: float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v for (k, v) in pred.items()} for pred in predictions]
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f'{output_dir} is not a directory.')
prediction_file = os.path.join(output_dir, 'predictions.json' if prefix is None else f'{prefix}_predictions.json')
nbest_file = os.path.join(output_dir, 'nbest_predictions.json' if prefix is None else f'{prefix}_nbest_predictions.json')
if version_2_with_negative:
null_odds_file = os.path.join(output_dir, 'null_odds.json' if prefix is None else f'{prefix}_null_odds.json')
logger.info(f'Saving predictions to {prediction_file}.')
with open(prediction_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
logger.info(f'Saving nbest_preds to {nbest_file}.')
with open(nbest_file, 'w') as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + '\n')
if version_2_with_negative:
logger.info(f'Saving null_odds to {null_odds_file}.')
with open(null_odds_file, 'w') as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
return all_predictions
def post_processing_function_qa(examples, features, predictions, version_2_with_negative, config, stage='eval'):
predictions = postprocess_qa_predictions(examples=examples, features=features, predictions=predictions, version_2_with_negative=version_2_with_negative, n_best_size=20, max_answer_length=30, null_score_diff_threshold=0.0, output_dir=None, prefix=stage, config=config)
if version_2_with_negative:
formatted_predictions = [{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for (k, v) in predictions.items()]
else:
formatted_predictions = [{'id': k, 'prediction_text': v} for (k, v) in predictions.items()]
references = [{'id': str(ex['id']), 'answers': ex[config.answer_column]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
def compute_metrics(pred, eval_dataset, eval_examples, use_v2, config):
(preds, label_ids) = post_processing_function_qa(eval_examples, eval_dataset, pred.predictions, use_v2, config)
if use_v2:
result = SQUAD_V2_METRIC.compute(predictions=preds, references=label_ids)
else:
result = SQUAD_METRIC.compute(predictions=preds, references=label_ids)
return {k: round(v, 4) for (k, v) in result.items()}
def create_model_card(config, trainer):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items()]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
def prepare_qa_validation_features(examples, tokenizer, config):
pad_on_right = tokenizer.padding_side == 'right'
examples[config.question_column] = [q.lstrip() for q in examples[config.question_column]]
tokenized_examples = tokenizer(examples[config.question_column if pad_on_right else config.text_column], examples[config.text_column if pad_on_right else config.question_column], truncation='only_second' if pad_on_right else 'only_first', max_length=config.max_seq_length, stride=config.max_doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding='max_length')
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
tokenized_examples['example_id'] = []
for i in range(len(tokenized_examples['input_ids'])):
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
sample_index = sample_mapping[i]
tokenized_examples['example_id'].append(examples['id'][sample_index])
tokenized_examples['offset_mapping'][i] = [o if sequence_ids[k] == context_index else None for (k, o) in enumerate(tokenized_examples['offset_mapping'][i])]
return tokenized_examples
# File: autotrain-advanced-main/src/autotrain/trainers/generic/__main__.py
import argparse
import json
from autotrain import logger
from autotrain.trainers.common import monitor, pause_space
from autotrain.trainers.generic import utils
from autotrain.trainers.generic.params import GenericParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
return parser.parse_args()
@monitor
def run(config):
if isinstance(config, dict):
config = GenericParams(**config)
logger.info('Downloading data repo...')
utils.pull_dataset_repo(config)
logger.info('Unintalling requirements...')
utils.uninstall_requirements(config)
logger.info('Installing requirements...')
utils.install_requirements(config)
logger.info('Running command...')
utils.run_command(config)
pause_space(config)
if __name__ == '__main__':
args = parse_args()
_config = json.load(open(args.config))
_config = GenericParams(**_config)
run(_config)
# File: autotrain-advanced-main/src/autotrain/trainers/generic/params.py
from typing import Dict, Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class GenericParams(AutoTrainParams):
username: str = Field(None, title='Hugging Face Username')
project_name: str = Field('project-name', title='path to script.py')
data_path: str = Field(None, title='Data path')
token: str = Field(None, title='Hub Token')
script_path: str = Field(None, title='Script path')
env: Optional[Dict[str, str]] = Field(None, title='Environment Variables')
args: Optional[Dict[str, str]] = Field(None, title='Arguments')
# File: autotrain-advanced-main/src/autotrain/trainers/generic/utils.py
import os
import subprocess
import requests
from huggingface_hub import HfApi, snapshot_download
from autotrain import logger
def create_dataset_repo(username, project_name, script_path, token):
logger.info('Creating dataset repo...')
api = HfApi(token=token)
repo_id = f'{username}/autotrain-{project_name}'
api.create_repo(repo_id=repo_id, repo_type='dataset', private=True)
logger.info('Uploading dataset...')
api.upload_folder(folder_path=script_path, repo_id=repo_id, repo_type='dataset')
logger.info('Dataset uploaded.')
return repo_id
def pull_dataset_repo(params):
snapshot_download(repo_id=params.data_path, local_dir=params.project_name, token=params.token, repo_type='dataset')
def uninstall_requirements(params):
if os.path.exists(f'{params.project_name}/requirements.txt'):
uninstall_list = []
with open(f'{params.project_name}/requirements.txt', 'r', encoding='utf-8') as f:
for line in f:
if line.startswith('-'):
uninstall_list.append(line[1:])
with open(f'{params.project_name}/uninstall.txt', 'w', encoding='utf-8') as f:
for line in uninstall_list:
f.write(line)
pipe = subprocess.Popen(['pip', 'uninstall', '-r', 'uninstall.txt', '-y'], cwd=params.project_name)
pipe.wait()
logger.info('Requirements uninstalled.')
return
def install_requirements(params):
if os.path.exists(f'{params.project_name}/requirements.txt'):
install_list = []
with open(f'{params.project_name}/requirements.txt', 'r', encoding='utf-8') as f:
for line in f:
if not line.startswith('-'):
install_list.append(line)
with open(f'{params.project_name}/requirements.txt', 'w', encoding='utf-8') as f:
for line in install_list:
f.write(line)
pipe = subprocess.Popen(['pip', 'install', '-r', 'requirements.txt'], cwd=params.project_name)
pipe.wait()
logger.info('Requirements installed.')
return
logger.info('No requirements.txt found. Skipping requirements installation.')
return
def run_command(params):
if os.path.exists(f'{params.project_name}/script.py'):
cmd = ['python', 'script.py']
if params.args:
for arg in params.args:
cmd.append(f'--{arg}')
if params.args[arg] != '':
cmd.append(params.args[arg])
pipe = subprocess.Popen(cmd, cwd=params.project_name)
pipe.wait()
logger.info('Command finished.')
return
raise ValueError('No script.py found.')
def pause_endpoint(params):
endpoint_id = os.environ['ENDPOINT_ID']
username = endpoint_id.split('/')[0]
project_name = endpoint_id.split('/')[1]
api_url = f'https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause'
headers = {'Authorization': f'Bearer {params.token}'}
r = requests.post(api_url, headers=headers, timeout=120)
return r.json()
# File: autotrain-advanced-main/src/autotrain/trainers/image_classification/__main__.py
import argparse
import json
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from transformers import AutoConfig, AutoImageProcessor, AutoModelForImageClassification, EarlyStoppingCallback, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.image_classification import utils
from autotrain.trainers.image_classification.params import ImageClassificationParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = ImageClassificationParams(**config)
valid_data = None
if config.data_path == f'{config.project_name}/autotrain-data':
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
logger.info(f'Train data: {train_data}')
logger.info(f'Valid data: {valid_data}')
classes = train_data.features[config.target_column].names
logger.info(f'Classes: {classes}')
label2id = {c: i for (i, c) in enumerate(classes)}
num_classes = len(classes)
if num_classes < 2:
raise ValueError('Invalid number of classes. Must be greater than 1.')
if config.valid_split is not None:
num_classes_valid = len(valid_data.unique(config.target_column))
if num_classes_valid != num_classes:
raise ValueError(f'Number of classes in train and valid are not the same. Training has {num_classes} and valid has {num_classes_valid}')
model_config = AutoConfig.from_pretrained(config.model, num_labels=num_classes, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token)
model_config._num_labels = len(label2id)
model_config.label2id = label2id
model_config.id2label = {v: k for (k, v) in label2id.items()}
try:
model = AutoModelForImageClassification.from_pretrained(config.model, config=model_config, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
except OSError:
model = AutoModelForImageClassification.from_pretrained(config.model, config=model_config, from_tf=True, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
image_processor = AutoImageProcessor.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
(train_data, valid_data) = utils.process_data(train_data, valid_data, image_processor, config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
args = TrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, compute_metrics=utils._binary_classification_metrics if num_classes == 2 else utils._multi_class_classification_metrics)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
image_processor.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer, num_classes)
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
_args = parse_args()
training_config = json.load(open(_args.training_config))
_config = ImageClassificationParams(**training_config)
train(_config)
# File: autotrain-advanced-main/src/autotrain/trainers/image_classification/dataset.py
import numpy as np
import torch
class ImageClassificationDataset:
def __init__(self, data, transforms, config):
self.data = data
self.transforms = transforms
self.config = config
def __len__(self):
return len(self.data)
def __getitem__(self, item):
image = self.data[item][self.config.image_column]
target = int(self.data[item][self.config.target_column])
image = self.transforms(image=np.array(image.convert('RGB')))['image']
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return {'pixel_values': torch.tensor(image, dtype=torch.float), 'labels': torch.tensor(target, dtype=torch.long)}
# File: autotrain-advanced-main/src/autotrain/trainers/image_classification/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class ImageClassificationParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('google/vit-base-patch16-224', title='Model name')
username: Optional[str] = Field(None, title='Hugging Face Username')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
image_column: str = Field('image', title='Image column')
target_column: str = Field('target', title='Target column')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/image_classification/utils.py
import os
import albumentations as A
import numpy as np
from sklearn import metrics
from autotrain.trainers.image_classification.dataset import ImageClassificationDataset
BINARY_CLASSIFICATION_EVAL_METRICS = ('eval_loss', 'eval_accuracy', 'eval_f1', 'eval_auc', 'eval_precision', 'eval_recall')
MULTI_CLASS_CLASSIFICATION_EVAL_METRICS = ('eval_loss', 'eval_accuracy', 'eval_f1_macro', 'eval_f1_micro', 'eval_f1_weighted', 'eval_precision_macro', 'eval_precision_micro', 'eval_precision_weighted', 'eval_recall_macro', 'eval_recall_micro', 'eval_recall_weighted')
MODEL_CARD = '\n---\ntags:\n- autotrain\n- image-classification{base_model}\nwidget:\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg\n example_title: Tiger\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg\n example_title: Teapot\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg\n example_title: Palace{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Image Classification\n\n## Validation Metrics\n{validation_metrics}\n'
def _binary_classification_metrics(pred):
(raw_predictions, labels) = pred
predictions = np.argmax(raw_predictions, axis=1)
result = {'f1': metrics.f1_score(labels, predictions), 'precision': metrics.precision_score(labels, predictions), 'recall': metrics.recall_score(labels, predictions), 'auc': metrics.roc_auc_score(labels, raw_predictions[:, 1]), 'accuracy': metrics.accuracy_score(labels, predictions)}
return result
def _multi_class_classification_metrics(pred):
(raw_predictions, labels) = pred
predictions = np.argmax(raw_predictions, axis=1)
results = {'f1_macro': metrics.f1_score(labels, predictions, average='macro'), 'f1_micro': metrics.f1_score(labels, predictions, average='micro'), 'f1_weighted': metrics.f1_score(labels, predictions, average='weighted'), 'precision_macro': metrics.precision_score(labels, predictions, average='macro'), 'precision_micro': metrics.precision_score(labels, predictions, average='micro'), 'precision_weighted': metrics.precision_score(labels, predictions, average='weighted'), 'recall_macro': metrics.recall_score(labels, predictions, average='macro'), 'recall_micro': metrics.recall_score(labels, predictions, average='micro'), 'recall_weighted': metrics.recall_score(labels, predictions, average='weighted'), 'accuracy': metrics.accuracy_score(labels, predictions)}
return results
def process_data(train_data, valid_data, image_processor, config):
if 'shortest_edge' in image_processor.size:
size = image_processor.size['shortest_edge']
else:
size = (image_processor.size['height'], image_processor.size['width'])
try:
(height, width) = size
except TypeError:
height = size
width = size
train_transforms = A.Compose([A.RandomResizedCrop(height=height, width=width), A.RandomRotate90(), A.HorizontalFlip(p=0.5), A.RandomBrightnessContrast(p=0.2), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std)])
val_transforms = A.Compose([A.Resize(height=height, width=width), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std)])
train_data = ImageClassificationDataset(train_data, train_transforms, config)
if valid_data is not None:
valid_data = ImageClassificationDataset(valid_data, val_transforms, config)
return (train_data, valid_data)
return (train_data, None)
def create_model_card(config, trainer, num_classes):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
valid_metrics = BINARY_CLASSIFICATION_EVAL_METRICS if num_classes == 2 else MULTI_CLASS_CLASSIFICATION_EVAL_METRICS
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items() if k in valid_metrics]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
# File: autotrain-advanced-main/src/autotrain/trainers/image_regression/__main__.py
import argparse
import json
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from transformers import AutoConfig, AutoImageProcessor, AutoModelForImageClassification, EarlyStoppingCallback, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.image_regression import utils
from autotrain.trainers.image_regression.params import ImageRegressionParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = ImageRegressionParams(**config)
valid_data = None
if config.data_path == f'{config.project_name}/autotrain-data':
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
logger.info(f'Train data: {train_data}')
logger.info(f'Valid data: {valid_data}')
model_config = AutoConfig.from_pretrained(config.model, num_labels=1, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token)
model_config._num_labels = 1
label2id = {'target': 0}
model_config.label2id = label2id
model_config.id2label = {v: k for (k, v) in label2id.items()}
try:
model = AutoModelForImageClassification.from_pretrained(config.model, config=model_config, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
except OSError:
model = AutoModelForImageClassification.from_pretrained(config.model, config=model_config, from_tf=True, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
image_processor = AutoImageProcessor.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
(train_data, valid_data) = utils.process_data(train_data, valid_data, image_processor, config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
args = TrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, compute_metrics=utils.image_regression_metrics)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
image_processor.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer)
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
_args = parse_args()
training_config = json.load(open(_args.training_config))
_config = ImageRegressionParams(**training_config)
train(_config)
# File: autotrain-advanced-main/src/autotrain/trainers/image_regression/dataset.py
import numpy as np
import torch
class ImageRegressionDataset:
def __init__(self, data, transforms, config):
self.data = data
self.transforms = transforms
self.config = config
def __len__(self):
return len(self.data)
def __getitem__(self, item):
image = self.data[item][self.config.image_column]
target = self.data[item][self.config.target_column]
image = self.transforms(image=np.array(image.convert('RGB')))['image']
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return {'pixel_values': torch.tensor(image, dtype=torch.float), 'labels': torch.tensor(target, dtype=torch.float)}
# File: autotrain-advanced-main/src/autotrain/trainers/image_regression/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class ImageRegressionParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('google/vit-base-patch16-224', title='Model name')
username: Optional[str] = Field(None, title='Hugging Face Username')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
image_column: str = Field('image', title='Image column')
target_column: str = Field('target', title='Target column')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/image_regression/utils.py
import os
import albumentations as A
import numpy as np
from sklearn import metrics
from autotrain.trainers.image_regression.dataset import ImageRegressionDataset
VALID_METRICS = ['eval_loss', 'eval_mse', 'eval_mae', 'eval_r2', 'eval_rmse', 'eval_explained_variance']
MODEL_CARD = '\n---\ntags:\n- autotrain\n- vision\n- image-classification\n- image-regression{base_model}\nwidget:\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg\n example_title: Tiger\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg\n example_title: Teapot\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg\n example_title: Palace{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Image Regression\n\n## Validation Metrics\n\n{validation_metrics}\n'
def image_regression_metrics(pred):
(raw_predictions, labels) = pred
try:
raw_predictions = [r for preds in raw_predictions for r in preds]
except TypeError as err:
if 'numpy.float32' not in str(err):
raise Exception(err)
pred_dict = {}
metrics_to_calculate = {'mse': metrics.mean_squared_error, 'mae': metrics.mean_absolute_error, 'r2': metrics.r2_score, 'rmse': lambda y_true, y_pred: np.sqrt(metrics.mean_squared_error(y_true, y_pred)), 'explained_variance': metrics.explained_variance_score}
for (key, func) in metrics_to_calculate.items():
try:
pred_dict[key] = float(func(labels, raw_predictions))
except Exception:
pred_dict[key] = -999
return pred_dict
def process_data(train_data, valid_data, image_processor, config):
if 'shortest_edge' in image_processor.size:
size = image_processor.size['shortest_edge']
else:
size = (image_processor.size['height'], image_processor.size['width'])
try:
(height, width) = size
except TypeError:
height = size
width = size
train_transforms = A.Compose([A.RandomResizedCrop(height=height, width=width), A.RandomRotate90(), A.HorizontalFlip(p=0.5), A.RandomBrightnessContrast(p=0.2), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std)])
val_transforms = A.Compose([A.Resize(height=height, width=width), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std)])
train_data = ImageRegressionDataset(train_data, train_transforms, config)
if valid_data is not None:
valid_data = ImageRegressionDataset(valid_data, val_transforms, config)
return (train_data, valid_data)
return (train_data, None)
def create_model_card(config, trainer):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items() if k in VALID_METRICS]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
# File: autotrain-advanced-main/src/autotrain/trainers/object_detection/__main__.py
import argparse
import json
from functools import partial
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from transformers import AutoConfig, AutoImageProcessor, AutoModelForObjectDetection, EarlyStoppingCallback, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.object_detection import utils
from autotrain.trainers.object_detection.params import ObjectDetectionParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = ObjectDetectionParams(**config)
valid_data = None
if config.data_path == f'{config.project_name}/autotrain-data':
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
logger.info(f'Train data: {train_data}')
logger.info(f'Valid data: {valid_data}')
categories = train_data.features[config.objects_column].feature['category'].names
id2label = dict(enumerate(categories))
label2id = {v: k for (k, v) in id2label.items()}
model_config = AutoConfig.from_pretrained(config.model, label2id=label2id, id2label=id2label, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token)
try:
model = AutoModelForObjectDetection.from_pretrained(config.model, config=model_config, ignore_mismatched_sizes=True, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token)
except OSError:
model = AutoModelForObjectDetection.from_pretrained(config.model, config=model_config, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True, from_tf=True)
image_processor = AutoImageProcessor.from_pretrained(config.model, token=config.token, do_pad=False, do_resize=False, size={'longest_edge': config.image_square_size}, trust_remote_code=ALLOW_REMOTE_CODE)
(train_data, valid_data) = utils.process_data(train_data, valid_data, image_processor, config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
training_args['eval_do_concat_batches'] = False
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
_compute_metrics_fn = partial(utils.object_detection_metrics, image_processor=image_processor, id2label=id2label, threshold=0.0)
args = TrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, data_collator=utils.collate_fn, tokenizer=image_processor, compute_metrics=_compute_metrics_fn)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
image_processor.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer)
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
_args = parse_args()
training_config = json.load(open(_args.training_config))
_config = ObjectDetectionParams(**training_config)
train(_config)
# File: autotrain-advanced-main/src/autotrain/trainers/object_detection/dataset.py
import numpy as np
class ObjectDetectionDataset:
def __init__(self, data, transforms, image_processor, config):
self.data = data
self.transforms = transforms
self.image_processor = image_processor
self.config = config
def __len__(self):
return len(self.data)
def __getitem__(self, item):
image = self.data[item][self.config.image_column]
objects = self.data[item][self.config.objects_column]
output = self.transforms(image=np.array(image.convert('RGB')), bboxes=objects['bbox'], category=objects['category'])
image = output['image']
annotations = []
for j in range(len(output['bboxes'])):
annotations.append({'image_id': str(item), 'category_id': output['category'][j], 'iscrowd': 0, 'area': objects['bbox'][j][2] * objects['bbox'][j][3], 'bbox': output['bboxes'][j]})
annotations = {'annotations': annotations, 'image_id': str(item)}
result = self.image_processor(images=image, annotations=annotations, return_tensors='pt')
result['pixel_values'] = result['pixel_values'][0]
result['labels'] = result['labels'][0]
return result
# File: autotrain-advanced-main/src/autotrain/trainers/object_detection/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class ObjectDetectionParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('google/vit-base-patch16-224', title='Model name')
username: Optional[str] = Field(None, title='Hugging Face Username')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
image_column: str = Field('image', title='Image column')
objects_column: str = Field('objects', title='Target column')
log: str = Field('none', title='Logging using experiment tracking')
image_square_size: Optional[int] = Field(600, title='Image longest size will be resized to this value, then image will be padded to square.')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/object_detection/utils.py
import os
from dataclasses import dataclass
import albumentations as A
import torch
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from transformers.image_transforms import center_to_corners_format
from autotrain.trainers.object_detection.dataset import ObjectDetectionDataset
VALID_METRICS = ('eval_loss', 'eval_map', 'eval_map_50', 'eval_map_75', 'eval_map_small', 'eval_map_medium', 'eval_map_large', 'eval_mar_1', 'eval_mar_10', 'eval_mar_100', 'eval_mar_small', 'eval_mar_medium', 'eval_mar_large')
MODEL_CARD = '\n---\ntags:\n- autotrain\n- object-detection\n- vision{base_model}\nwidget:\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg\n example_title: Tiger\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg\n example_title: Teapot\n- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg\n example_title: Palace{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Object Detection\n\n## Validation Metrics\n{validation_metrics}\n'
def collate_fn(batch):
data = {}
data['pixel_values'] = torch.stack([x['pixel_values'] for x in batch])
data['labels'] = [x['labels'] for x in batch]
if 'pixel_mask' in batch[0]:
data['pixel_mask'] = torch.stack([x['pixel_mask'] for x in batch])
return data
def process_data(train_data, valid_data, image_processor, config):
max_size = image_processor.size['longest_edge']
basic_transforms = [A.LongestMaxSize(max_size=max_size), A.PadIfNeeded(max_size, max_size, border_mode=0, value=(128, 128, 128), position='top_left')]
train_transforms = A.Compose([A.Compose([A.SmallestMaxSize(max_size=max_size, p=1.0), A.RandomSizedBBoxSafeCrop(height=max_size, width=max_size, p=1.0)], p=0.2), A.OneOf([A.Blur(blur_limit=7, p=0.5), A.MotionBlur(blur_limit=7, p=0.5), A.Defocus(radius=(1, 5), alias_blur=(0.1, 0.25), p=0.1)], p=0.1), A.Perspective(p=0.1), A.HorizontalFlip(p=0.5), A.RandomBrightnessContrast(p=0.5), A.HueSaturationValue(p=0.1), *basic_transforms], bbox_params=A.BboxParams(format='coco', label_fields=['category'], clip=True, min_area=25))
val_transforms = A.Compose(basic_transforms, bbox_params=A.BboxParams(format='coco', label_fields=['category'], clip=True))
train_data = ObjectDetectionDataset(train_data, train_transforms, image_processor, config)
if valid_data is not None:
valid_data = ObjectDetectionDataset(valid_data, val_transforms, image_processor, config)
return (train_data, valid_data)
return (train_data, None)
def convert_bbox_yolo_to_pascal(boxes, image_size):
boxes = center_to_corners_format(boxes)
(height, width) = image_size
boxes = boxes * torch.tensor([[width, height, width, height]])
return boxes
@torch.no_grad()
def object_detection_metrics(evaluation_results, image_processor, threshold=0.0, id2label=None):
@dataclass
class ModelOutput:
logits: torch.Tensor
pred_boxes: torch.Tensor
(predictions, targets) = (evaluation_results.predictions, evaluation_results.label_ids)
image_sizes = []
post_processed_targets = []
post_processed_predictions = []
for batch in targets:
batch_image_sizes = torch.tensor([x['orig_size'] for x in batch])
image_sizes.append(batch_image_sizes)
for image_target in batch:
boxes = torch.tensor(image_target['boxes'])
boxes = convert_bbox_yolo_to_pascal(boxes, image_target['orig_size'])
labels = torch.tensor(image_target['class_labels'])
post_processed_targets.append({'boxes': boxes, 'labels': labels})
for (batch, target_sizes) in zip(predictions, image_sizes):
(batch_logits, batch_boxes) = (batch[1], batch[2])
output = ModelOutput(logits=torch.tensor(batch_logits), pred_boxes=torch.tensor(batch_boxes))
post_processed_output = image_processor.post_process_object_detection(output, threshold=threshold, target_sizes=target_sizes)
post_processed_predictions.extend(post_processed_output)
metric = MeanAveragePrecision(box_format='xyxy', class_metrics=True)
metric.update(post_processed_predictions, post_processed_targets)
metrics = metric.compute()
classes = metrics.pop('classes')
try:
len(classes)
calc_map_per_class = True
except TypeError:
calc_map_per_class = False
if calc_map_per_class:
map_per_class = metrics.pop('map_per_class')
mar_100_per_class = metrics.pop('mar_100_per_class')
for (class_id, class_map, class_mar) in zip(classes, map_per_class, mar_100_per_class):
class_name = id2label[class_id.item()] if id2label is not None else class_id.item()
metrics[f'map_{class_name}'] = class_map
metrics[f'mar_100_{class_name}'] = class_mar
metrics = {k: round(v.item(), 4) for (k, v) in metrics.items()}
return metrics
def create_model_card(config, trainer):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items() if k in VALID_METRICS]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
# File: autotrain-advanced-main/src/autotrain/trainers/sent_transformers/__main__.py
import argparse
import json
from functools import partial
from accelerate import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, TripletEvaluator
from sentence_transformers.losses import CoSENTLoss, MultipleNegativesRankingLoss, SoftmaxLoss
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from transformers import EarlyStoppingCallback
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.sent_transformers import utils
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = SentenceTransformersParams(**config)
train_data = None
valid_data = None
if config.train_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
num_classes = None
if config.trainer == 'pair_class':
classes = train_data.features[config.target_column].names
num_classes = len(classes)
if num_classes < 2:
raise ValueError('Invalid number of classes. Must be greater than 1.')
if config.valid_split is not None:
num_classes_valid = len(valid_data.unique(config.target_column))
if num_classes_valid != num_classes:
raise ValueError(f'Number of classes in train and valid are not the same. Training has {num_classes} and valid has {num_classes_valid}')
if config.logging_steps == -1:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
train_data = utils.process_columns(train_data, config)
logger.info(f'Train data: {train_data}')
if config.valid_split is not None:
valid_data = utils.process_columns(valid_data, config)
logger.info(f'Valid data: {valid_data}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
model = SentenceTransformer(config.model, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, model_kwargs={'ignore_mismatched_sizes': True})
loss_mapping = {'pair': MultipleNegativesRankingLoss, 'pair_class': partial(SoftmaxLoss, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=num_classes), 'pair_score': CoSENTLoss, 'triplet': MultipleNegativesRankingLoss, 'qa': MultipleNegativesRankingLoss}
evaluator = None
if config.valid_split is not None:
if config.trainer == 'pair_score':
evaluator = EmbeddingSimilarityEvaluator(sentences1=valid_data['sentence1'], sentences2=valid_data['sentence2'], scores=valid_data['score'], name=config.valid_split)
elif config.trainer == 'triplet':
evaluator = TripletEvaluator(anchors=valid_data['anchor'], positives=valid_data['positive'], negatives=valid_data['negative'])
logger.info('Setting up training arguments...')
args = SentenceTransformerTrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use)
logger.info('Setting up trainer...')
trainer = SentenceTransformerTrainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data, loss=loss_mapping[config.trainer], evaluator=evaluator)
trainer.remove_callback(PrinterCallback)
logger.info('Starting training...')
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
model_card = utils.create_model_card(config, trainer)
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
_args = parse_args()
training_config = json.load(open(_args.training_config))
_config = SentenceTransformersParams(**training_config)
train(_config)
# File: autotrain-advanced-main/src/autotrain/trainers/sent_transformers/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class SentenceTransformersParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('microsoft/mpnet-base', title='Model name')
lr: float = Field(3e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
max_seq_length: int = Field(128, title='Max sequence length')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
username: Optional[str] = Field(None, title='Hugging Face Username')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
trainer: str = Field('pair_score', title='Trainer name')
sentence1_column: str = Field('sentence1', title='Sentence 1 column')
sentence2_column: str = Field('sentence2', title='Sentence 2 column')
sentence3_column: Optional[str] = Field('sentence3', title='Sentence 3 column')
target_column: Optional[str] = Field('target', title='Target column')
# File: autotrain-advanced-main/src/autotrain/trainers/sent_transformers/utils.py
import os
from autotrain import logger
MODEL_CARD = '\n---\nlibrary_name: sentence-transformers\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- autotrain{base_model}\nwidget:\n- source_sentence: \'search_query: i love autotrain\'\n sentences:\n - \'search_query: huggingface auto train\'\n - \'search_query: hugging face auto train\'\n - \'search_query: i love autotrain\'\npipeline_tag: sentence-similarity{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Sentence Transformers\n\n## Validation Metrics\n{validation_metrics}\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the Hugging Face Hub\nmodel = SentenceTransformer("sentence_transformers_model_id")\n# Run inference\nsentences = [\n \'search_query: autotrain\',\n \'search_query: auto train\',\n \'search_query: i love autotrain\',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n```\n'
def process_columns(data, config):
if config.trainer == 'pair':
if not (config.sentence1_column == 'anchor' and config.sentence1_column in data.column_names):
data = data.rename_column(config.sentence1_column, 'anchor')
if not (config.sentence2_column == 'positive' and config.sentence2_column in data.column_names):
data = data.rename_column(config.sentence2_column, 'positive')
elif config.trainer == 'pair_class':
if not (config.sentence1_column == 'premise' and config.sentence1_column in data.column_names):
data = data.rename_column(config.sentence1_column, 'premise')
if not (config.sentence2_column == 'hypothesis' and config.sentence2_column in data.column_names):
data = data.rename_column(config.sentence2_column, 'hypothesis')
if not (config.target_column == 'label' and config.target_column in data.column_names):
data = data.rename_column(config.target_column, 'label')
elif config.trainer == 'pair_score':
if not (config.sentence1_column == 'sentence1' and config.sentence1_column in data.column_names):
data = data.rename_column(config.sentence1_column, 'sentence1')
if not (config.sentence2_column == 'sentence2' and config.sentence2_column in data.column_names):
data = data.rename_column(config.sentence2_column, 'sentence2')
if not (config.target_column == 'score' and config.target_column in data.column_names):
data = data.rename_column(config.target_column, 'score')
elif config.trainer == 'triplet':
if not (config.sentence1_column == 'anchor' and config.sentence1_column in data.column_names):
data = data.rename_column(config.sentence1_column, 'anchor')
if not (config.sentence2_column == 'positive' and config.sentence2_column in data.column_names):
data = data.rename_column(config.sentence2_column, 'positive')
if not (config.sentence3_column == 'negative' and config.sentence3_column in data.column_names):
data = data.rename_column(config.sentence3_column, 'negative')
elif config.trainer == 'qa':
if not (config.sentence1_column == 'query' and config.sentence1_column in data.column_names):
data = data.rename_column(config.sentence1_column, 'query')
if not (config.sentence2_column == 'answer' and config.sentence2_column in data.column_names):
data = data.rename_column(config.sentence2_column, 'answer')
else:
raise ValueError(f'Invalid trainer: {config.trainer}')
return data
def create_model_card(config, trainer):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
logger.info(eval_scores)
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items()]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
# File: autotrain-advanced-main/src/autotrain/trainers/seq2seq/__main__.py
import argparse
import json
from functools import partial
import torch
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training
from transformers import AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, BitsAndBytesConfig, DataCollatorForSeq2Seq, EarlyStoppingCallback, Seq2SeqTrainer, Seq2SeqTrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.seq2seq import utils
from autotrain.trainers.seq2seq.dataset import Seq2SeqDataset
from autotrain.trainers.seq2seq.params import Seq2SeqParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = Seq2SeqParams(**config)
train_data = None
valid_data = None
if config.train_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
train_data = Seq2SeqDataset(data=train_data, tokenizer=tokenizer, config=config)
if config.valid_split is not None:
valid_data = Seq2SeqDataset(data=valid_data, tokenizer=tokenizer, config=config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False, predict_with_generate=True, seed=config.seed)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
args = Seq2SeqTrainingArguments(**training_args)
model_config = AutoConfig.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_cache=False)
if config.peft:
if config.quantization == 'int4':
raise NotImplementedError('int4 quantization is not supported')
if config.quantization == 'int8':
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
else:
bnb_config = None
model = AutoModelForSeq2SeqLM.from_pretrained(config.model, config=model_config, token=config.token, quantization_config=bnb_config, trust_remote_code=ALLOW_REMOTE_CODE)
else:
model = AutoModelForSeq2SeqLM.from_pretrained(config.model, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
embedding_size = model.get_input_embeddings().weight.shape[0]
if len(tokenizer) > embedding_size:
model.resize_token_embeddings(len(tokenizer))
if config.peft:
target_modules = config.target_modules.split(',') if config.target_modules is not None else None
if target_modules:
target_modules = [module.strip() for module in target_modules]
if len(target_modules) == 1 and target_modules[0] == 'all-linear':
target_modules = 'all-linear'
lora_config = LoraConfig(r=config.lora_r, lora_alpha=config.lora_alpha, target_modules=target_modules, lora_dropout=config.lora_dropout, bias='none', task_type=TaskType.SEQ_2_SEQ_LM)
if config.quantization is not None:
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, lora_config)
_s2s_metrics = partial(utils._seq2seq_metrics, tokenizer=tokenizer)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, compute_metrics=_s2s_metrics)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
trainer = Seq2SeqTrainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data, data_collator=data_collator, tokenizer=tokenizer)
for (name, module) in trainer.model.named_modules():
if 'norm' in name:
module = module.to(torch.float32)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.model.config.use_cache = True
trainer.save_model(config.project_name)
model_card = utils.create_model_card(config, trainer)
with open(f'{config.project_name}/README.md', 'w', encoding='utf-8') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
_args = parse_args()
training_config = json.load(open(_args.training_config))
config = Seq2SeqParams(**training_config)
train(config)
# File: autotrain-advanced-main/src/autotrain/trainers/seq2seq/dataset.py
class Seq2SeqDataset:
def __init__(self, data, tokenizer, config):
self.data = data
self.tokenizer = tokenizer
self.config = config
self.max_len_input = self.config.max_seq_length
self.max_len_target = self.config.max_target_length
def __len__(self):
return len(self.data)
def __getitem__(self, item):
text = str(self.data[item][self.config.text_column])
target = str(self.data[item][self.config.target_column])
model_inputs = self.tokenizer(text, max_length=self.max_len_input, truncation=True)
labels = self.tokenizer(text_target=target, max_length=self.max_len_target, truncation=True)
model_inputs['labels'] = labels['input_ids']
return model_inputs
# File: autotrain-advanced-main/src/autotrain/trainers/seq2seq/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class Seq2SeqParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('google/flan-t5-base', title='Model name')
username: Optional[str] = Field(None, title='Hugging Face Username')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
project_name: str = Field('project-name', title='Output directory')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
text_column: str = Field('text', title='Text column')
target_column: str = Field('target', title='Target text column')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
max_seq_length: int = Field(128, title='Max sequence length')
max_target_length: int = Field(128, title='Max target sequence length')
batch_size: int = Field(2, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
logging_steps: int = Field(-1, title='Logging steps')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
peft: bool = Field(False, title='Use PEFT')
quantization: Optional[str] = Field('int8', title='int4, int8, or None')
lora_r: int = Field(16, title='LoRA-R')
lora_alpha: int = Field(32, title='LoRA-Alpha')
lora_dropout: float = Field(0.05, title='LoRA-Dropout')
target_modules: str = Field('all-linear', title='Target modules for PEFT')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/seq2seq/utils.py
import os
import evaluate
import nltk
import numpy as np
ROUGE_METRIC = evaluate.load('rouge')
MODEL_CARD = '\n---\ntags:\n- autotrain\n- text2text-generation{base_model}\nwidget:\n- text: "I love AutoTrain"{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Seq2Seq\n\n## Validation Metrics\n{validation_metrics}\n'
def _seq2seq_metrics(pred, tokenizer):
(predictions, labels) = pred
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds = ['\n'.join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds]
decoded_labels = ['\n'.join(nltk.sent_tokenize(label.strip())) for label in decoded_labels]
result = ROUGE_METRIC.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {key: value * 100 for (key, value) in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions]
result['gen_len'] = np.mean(prediction_lens)
return {k: round(v, 4) for (k, v) in result.items()}
def create_model_card(config, trainer):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items()]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
# File: autotrain-advanced-main/src/autotrain/trainers/tabular/__main__.py
import argparse
import json
import os
from functools import partial
import joblib
import numpy as np
import optuna
import pandas as pd
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from sklearn import pipeline, preprocessing
from sklearn.compose import ColumnTransformer
from autotrain import logger
from autotrain.trainers.common import monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.tabular import utils
from autotrain.trainers.tabular.params import TabularParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
def optimize(trial, model_name, xtrain, xvalid, ytrain, yvalid, eval_metric, task, preprocessor):
if isinstance(trial, dict):
params = trial
else:
params = utils.get_params(trial, model_name, task)
labels = None
if task == 'multi_class_classification':
labels = np.unique(ytrain)
metrics = utils.TabularMetrics(sub_task=task, labels=labels)
if task in ('binary_classification', 'multi_class_classification', 'single_column_regression'):
ytrain = ytrain.ravel()
yvalid = yvalid.ravel()
if preprocessor is not None:
try:
xtrain = preprocessor.fit_transform(xtrain)
xvalid = preprocessor.transform(xvalid)
except ValueError:
logger.info('Preprocessing failed, using nan_to_num')
train_cols = xtrain.columns.tolist()
valid_cols = xvalid.columns.tolist()
xtrain = np.nan_to_num(xtrain)
xvalid = np.nan_to_num(xvalid)
xtrain = pd.DataFrame(xtrain, columns=train_cols)
xvalid = pd.DataFrame(xvalid, columns=valid_cols)
xtrain = preprocessor.fit_transform(xtrain)
xvalid = preprocessor.transform(xvalid)
if model_name == 'xgboost':
params['eval_metric'] = eval_metric
_model = utils.TabularModel(model_name, preprocessor=None, sub_task=task, params=params)
model = _model.pipeline
models = []
if task in ('multi_label_classification', 'multi_column_regression'):
ypred = []
models = [model] * ytrain.shape[1]
for (idx, _m) in enumerate(models):
if model_name == 'xgboost':
_m.fit(xtrain, ytrain[:, idx], model__eval_set=[(xvalid, yvalid[:, idx])], model__verbose=False)
else:
_m.fit(xtrain, ytrain[:, idx])
if task == 'multi_column_regression':
ypred_temp = _m.predict(xvalid)
elif _model.use_predict_proba:
ypred_temp = _m.predict_proba(xvalid)[:, 1]
else:
ypred_temp = _m.predict(xvalid)
ypred.append(ypred_temp)
ypred = np.column_stack(ypred)
else:
models = [model]
if model_name == 'xgboost':
model.fit(xtrain, ytrain, model__eval_set=[(xvalid, yvalid)], model__verbose=False)
else:
models[0].fit(xtrain, ytrain)
if _model.use_predict_proba:
ypred = models[0].predict_proba(xvalid)
else:
ypred = models[0].predict(xvalid)
if task == 'multi_class_classification':
if ypred.reshape(xvalid.shape[0], -1).shape[1] != len(labels):
ypred_ohe = np.zeros((xvalid.shape[0], len(labels)))
ypred_ohe[np.arange(xvalid.shape[0]), ypred] = 1
ypred = ypred_ohe
if task == 'binary_classification':
if ypred.reshape(xvalid.shape[0], -1).shape[1] != 2:
ypred = np.column_stack([1 - ypred, ypred])
metric_dict = metrics.calculate(yvalid, ypred)
if eval_metric in metric_dict:
metric_dict['loss'] = metric_dict[eval_metric]
logger.info(f'Metrics: {metric_dict}')
if isinstance(trial, dict):
return (models, preprocessor, metric_dict)
return metric_dict['loss']
@monitor
def train(config):
if isinstance(config, dict):
config = TabularParams(**config)
logger.info('Starting training...')
logger.info(f'Training config: {config}')
train_data = None
valid_data = None
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
train_data = train_data.to_pandas()
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
valid_data = valid_data.to_pandas()
if valid_data is None:
raise Exception('valid_data is None. Please provide a valid_split for tabular training.')
if config.categorical_columns is None:
config.categorical_columns = utils.get_categorical_columns(train_data)
if config.numerical_columns is None:
config.numerical_columns = utils.get_numerical_columns(train_data)
_id_target_cols = [config.id_column] + config.target_columns if config.id_column is not None else config.target_columns
config.numerical_columns = [c for c in config.numerical_columns if c not in _id_target_cols]
config.categorical_columns = [c for c in config.categorical_columns if c not in _id_target_cols]
useful_columns = config.categorical_columns + config.numerical_columns
logger.info(f'Categorical columns: {config.categorical_columns}')
logger.info(f'Numerical columns: {config.numerical_columns}')
for col in config.categorical_columns:
train_data[col] = train_data[col].astype('category')
valid_data[col] = valid_data[col].astype('category')
logger.info(f'Useful columns: {useful_columns}')
target_encoders = {}
if config.task == 'classification':
for target_column in config.target_columns:
target_encoder = preprocessing.LabelEncoder()
target_encoder.fit(train_data[target_column])
target_encoders[target_column] = target_encoder
for (k, v) in target_encoders.items():
train_data.loc[:, k] = v.transform(train_data[k])
valid_data.loc[:, k] = v.transform(valid_data[k])
numeric_transformer = 'passthrough'
categorical_transformer = 'passthrough'
transformers = []
preprocessor = None
numeric_steps = []
imputer = utils.get_imputer(config.numerical_imputer)
scaler = utils.get_scaler(config.numeric_scaler)
if imputer is not None:
numeric_steps.append(('num_imputer', imputer))
if scaler is not None:
numeric_steps.append(('num_scaler', scaler))
if len(numeric_steps) > 0:
numeric_transformer = pipeline.Pipeline(numeric_steps)
transformers.append(('numeric', numeric_transformer, config.numerical_columns))
categorical_steps = []
imputer = utils.get_imputer(config.categorical_imputer)
if imputer is not None:
categorical_steps.append(('cat_imputer', imputer))
if len(config.categorical_columns) > 0:
if config.model in ('xgboost', 'lightgbm', 'randomforest', 'catboost', 'extratrees'):
categorical_steps.append(('cat_encoder', preprocessing.OrdinalEncoder(handle_unknown='use_encoded_value', categories='auto', unknown_value=np.nan)))
else:
categorical_steps.append(('cat_encoder', preprocessing.OneHotEncoder(handle_unknown='ignore')))
if len(categorical_steps) > 0:
categorical_transformer = pipeline.Pipeline(categorical_steps)
transformers.append(('categorical', categorical_transformer, config.categorical_columns))
if len(transformers) > 0:
preprocessor = ColumnTransformer(transformers=transformers, verbose=True, n_jobs=-1)
logger.info(f'Preprocessor: {preprocessor}')
xtrain = train_data[useful_columns].reset_index(drop=True)
xvalid = valid_data[useful_columns].reset_index(drop=True)
ytrain = train_data[config.target_columns].values
yvalid = valid_data[config.target_columns].values
if config.task == 'classification':
if len(target_encoders) == 1:
if len(target_encoders[config.target_columns[0]].classes_) == 2:
sub_task = 'binary_classification'
else:
sub_task = 'multi_class_classification'
else:
sub_task = 'multi_label_classification'
elif len(config.target_columns) > 1:
sub_task = 'multi_column_regression'
else:
sub_task = 'single_column_regression'
(eval_metric, direction) = utils.get_metric_direction(sub_task)
logger.info(f'Sub task: {sub_task}')
args = {'model_name': config.model, 'xtrain': xtrain, 'xvalid': xvalid, 'ytrain': ytrain, 'yvalid': yvalid, 'eval_metric': eval_metric, 'task': sub_task, 'preprocessor': preprocessor}
optimize_func = partial(optimize, **args)
study = optuna.create_study(direction=direction, study_name='AutoTrain')
study.optimize(optimize_func, n_trials=config.num_trials, timeout=config.time_limit)
best_params = study.best_params
logger.info(f'Best params: {best_params}')
(best_models, best_preprocessors, best_metrics) = optimize(best_params, **args)
models = [pipeline.Pipeline([('preprocessor', best_preprocessors), ('model', m)]) for m in best_models] if best_preprocessors is not None else best_models
joblib.dump(models[0] if len(models) == 1 else models, os.path.join(config.project_name, 'model.joblib'))
joblib.dump(target_encoders, os.path.join(config.project_name, 'target_encoders.joblib'))
model_card = utils.create_model_card(config, sub_task, best_params, best_metrics)
if model_card is not None:
with open(os.path.join(config.project_name, 'README.md'), 'w') as fp:
fp.write(f'{model_card}')
if os.path.exists(f'{config.project_name}/training_params.json'):
training_params = json.load(open(f'{config.project_name}/training_params.json'))
training_params.pop('token')
json.dump(training_params, open(f'{config.project_name}/training_params.json', 'w'))
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
pause_space(config)
if __name__ == '__main__':
args = parse_args()
training_config = json.load(open(args.training_config))
config = TabularParams(**training_config)
train(config)
# File: autotrain-advanced-main/src/autotrain/trainers/tabular/params.py
from typing import List, Optional, Union
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class TabularParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('xgboost', title='Model name')
username: Optional[str] = Field(None, title='Hugging Face Username')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
project_name: str = Field('project-name', title='Output directory')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
id_column: str = Field('id', title='ID column')
target_columns: Union[List[str], str] = Field(['target'], title='Target column(s)')
categorical_columns: Optional[List[str]] = Field(None, title='Categorical columns')
numerical_columns: Optional[List[str]] = Field(None, title='Numerical columns')
task: str = Field('classification', title='Task')
num_trials: int = Field(10, title='Number of trials')
time_limit: int = Field(600, title='Time limit')
categorical_imputer: Optional[str] = Field(None, title='Categorical imputer')
numerical_imputer: Optional[str] = Field(None, title='Numerical imputer')
numeric_scaler: Optional[str] = Field(None, title='Numeric scaler')
# File: autotrain-advanced-main/src/autotrain/trainers/tabular/utils.py
import copy
from collections import defaultdict
from dataclasses import dataclass
from functools import partial
from typing import List, Optional
import numpy as np
from sklearn import ensemble, impute, linear_model
from sklearn import metrics as skmetrics
from sklearn import naive_bayes, neighbors, pipeline, preprocessing, svm, tree
from xgboost import XGBClassifier, XGBRegressor
MARKDOWN = '\n---\ntags:\n- autotrain\n- tabular\n- {task}\n- tabular-{task}\ndatasets:\n- {dataset}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Tabular {task}\n\n## Validation Metrics\n\n{metrics}\n\n## Best Params\n\n{params}\n\n## Usage\n\n```python\nimport json\nimport joblib\nimport pandas as pd\n\nmodel = joblib.load(\'model.joblib\')\nconfig = json.load(open(\'config.json\'))\n\nfeatures = config[\'features\']\n\n# data = pd.read_csv("data.csv")\ndata = data[features]\n\npredictions = model.predict(data) # or model.predict_proba(data)\n\n# predictions can be converted to original labels using label_encoders.pkl\n\n```\n'
_MODELS: dict = defaultdict(dict)
_MODELS['xgboost']['classification'] = XGBClassifier
_MODELS['xgboost']['regression'] = XGBRegressor
_MODELS['logistic_regression']['classification'] = linear_model.LogisticRegression
_MODELS['logistic_regression']['regression'] = linear_model.LogisticRegression
_MODELS['random_forest']['classification'] = ensemble.RandomForestClassifier
_MODELS['random_forest']['regression'] = ensemble.RandomForestRegressor
_MODELS['extra_trees']['classification'] = ensemble.ExtraTreesClassifier
_MODELS['extra_trees']['regression'] = ensemble.ExtraTreesRegressor
_MODELS['gradient_boosting']['classification'] = ensemble.GradientBoostingClassifier
_MODELS['gradient_boosting']['regression'] = ensemble.GradientBoostingRegressor
_MODELS['adaboost']['classification'] = ensemble.AdaBoostClassifier
_MODELS['adaboost']['regression'] = ensemble.AdaBoostRegressor
_MODELS['ridge']['classification'] = linear_model.RidgeClassifier
_MODELS['ridge']['regression'] = linear_model.Ridge
_MODELS['svm']['classification'] = svm.LinearSVC
_MODELS['svm']['regression'] = svm.LinearSVR
_MODELS['decision_tree']['classification'] = tree.DecisionTreeClassifier
_MODELS['decision_tree']['regression'] = tree.DecisionTreeRegressor
_MODELS['lasso']['regression'] = linear_model.Lasso
_MODELS['linear_regression']['regression'] = linear_model.LinearRegression
_MODELS['naive_bayes']['classification'] = naive_bayes.GaussianNB
_MODELS['knn']['classification'] = neighbors.KNeighborsClassifier
_MODELS['knn']['regression'] = neighbors.KNeighborsRegressor
CLASSIFICATION_TASKS = ('binary_classification', 'multi_class_classification', 'multi_label_classification')
REGRESSION_TASKS = ('single_column_regression', 'multi_column_regression')
@dataclass
class TabularMetrics:
sub_task: str
labels: Optional[List] = None
def __post_init__(self):
if self.sub_task == 'binary_classification':
self.valid_metrics = {'auc': skmetrics.roc_auc_score, 'logloss': skmetrics.log_loss, 'f1': skmetrics.f1_score, 'accuracy': skmetrics.accuracy_score, 'precision': skmetrics.precision_score, 'recall': skmetrics.recall_score}
elif self.sub_task == 'multi_class_classification':
self.valid_metrics = {'logloss': partial(skmetrics.log_loss, labels=self.labels), 'accuracy': skmetrics.accuracy_score, 'mlogloss': partial(skmetrics.log_loss, labels=self.labels), 'f1_macro': partial(skmetrics.f1_score, average='macro', labels=self.labels), 'f1_micro': partial(skmetrics.f1_score, average='micro', labels=self.labels), 'f1_weighted': partial(skmetrics.f1_score, average='weighted', labels=self.labels), 'precision_macro': partial(skmetrics.precision_score, average='macro', labels=self.labels), 'precision_micro': partial(skmetrics.precision_score, average='micro', labels=self.labels), 'precision_weighted': partial(skmetrics.precision_score, average='weighted', labels=self.labels), 'recall_macro': partial(skmetrics.recall_score, average='macro', labels=self.labels), 'recall_micro': partial(skmetrics.recall_score, average='micro', labels=self.labels), 'recall_weighted': partial(skmetrics.recall_score, average='weighted', labels=self.labels)}
elif self.sub_task in ('single_column_regression', 'multi_column_regression'):
self.valid_metrics = {'r2': skmetrics.r2_score, 'mse': skmetrics.mean_squared_error, 'mae': skmetrics.mean_absolute_error, 'rmse': partial(skmetrics.mean_squared_error, squared=False), 'rmsle': partial(skmetrics.mean_squared_log_error, squared=False)}
elif self.sub_task == 'multi_label_classification':
self.valid_metrics = {'logloss': skmetrics.log_loss}
else:
raise ValueError('Invalid problem type')
def calculate(self, y_true, y_pred):
metrics = {}
for (metric_name, metric_func) in self.valid_metrics.items():
if self.sub_task == 'binary_classification':
if metric_name == 'auc':
metrics[metric_name] = metric_func(y_true, y_pred[:, 1])
elif metric_name == 'logloss':
metrics[metric_name] = metric_func(y_true, y_pred)
else:
metrics[metric_name] = metric_func(y_true, y_pred[:, 1] >= 0.5)
elif self.sub_task == 'multi_class_classification':
if metric_name in ('accuracy', 'f1_macro', 'f1_micro', 'f1_weighted', 'precision_macro', 'precision_micro', 'precision_weighted', 'recall_macro', 'recall_micro', 'recall_weighted'):
metrics[metric_name] = metric_func(y_true, np.argmax(y_pred, axis=1))
else:
metrics[metric_name] = metric_func(y_true, y_pred)
elif metric_name == 'rmsle':
temp_pred = copy.deepcopy(y_pred)
temp_pred = np.clip(temp_pred, 0, None)
metrics[metric_name] = metric_func(y_true, temp_pred)
else:
metrics[metric_name] = metric_func(y_true, y_pred)
return metrics
class TabularModel:
def __init__(self, model, preprocessor, sub_task, params):
self.model = model
self.preprocessor = preprocessor
self.sub_task = sub_task
self.params = params
self.use_predict_proba = True
_model = self._get_model()
if self.preprocessor is not None:
self.pipeline = pipeline.Pipeline([('preprocessor', self.preprocessor), ('model', _model)])
else:
self.pipeline = pipeline.Pipeline([('model', _model)])
def _get_model(self):
if self.model in _MODELS:
if self.sub_task in CLASSIFICATION_TASKS:
if self.model in ('svm', 'ridge'):
self.use_predict_proba = False
return _MODELS[self.model]['classification'](**self.params)
elif self.sub_task in REGRESSION_TASKS:
self.use_predict_proba = False
return _MODELS[self.model]['regression'](**self.params)
else:
raise ValueError('Invalid task')
else:
raise ValueError('Invalid model')
def get_params(trial, model, task):
if model == 'xgboost':
params = {'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.25, log=True), 'reg_lambda': trial.suggest_float('reg_lambda', 1e-08, 100.0, log=True), 'reg_alpha': trial.suggest_float('reg_alpha', 1e-08, 100.0, log=True), 'subsample': trial.suggest_float('subsample', 0.1, 1.0), 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.1, 1.0), 'max_depth': trial.suggest_int('max_depth', 1, 9), 'early_stopping_rounds': trial.suggest_int('early_stopping_rounds', 100, 500), 'n_estimators': trial.suggest_categorical('n_estimators', [7000, 15000, 20000]), 'tree_method': 'hist', 'random_state': 42}
return params
if model == 'logistic_regression':
if task in CLASSIFICATION_TASKS:
params = {'C': trial.suggest_float('C', 1e-08, 1000.0, log=True), 'fit_intercept': trial.suggest_categorical('fit_intercept', [True, False]), 'solver': trial.suggest_categorical('solver', ['liblinear', 'saga']), 'penalty': trial.suggest_categorical('penalty', ['l1', 'l2']), 'n_jobs': -1}
return params
raise ValueError('Task not supported')
if model == 'random_forest':
params = {'n_estimators': trial.suggest_int('n_estimators', 10, 10000), 'max_depth': trial.suggest_int('max_depth', 2, 15), 'max_features': trial.suggest_categorical('max_features', ['auto', 'sqrt', 'log2', None]), 'min_samples_split': trial.suggest_int('min_samples_split', 2, 20), 'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, 20), 'bootstrap': trial.suggest_categorical('bootstrap', [True, False]), 'n_jobs': -1}
if task in CLASSIFICATION_TASKS:
params['criterion'] = trial.suggest_categorical('criterion', ['gini', 'entropy'])
return params
if task in REGRESSION_TASKS:
params['criterion'] = trial.suggest_categorical('criterion', ['squared_error', 'absolute_error', 'poisson'])
return params
raise ValueError('Task not supported')
if model == 'extra_trees':
params = {'n_estimators': trial.suggest_int('n_estimators', 10, 10000), 'max_depth': trial.suggest_int('max_depth', 2, 15), 'max_features': trial.suggest_categorical('max_features', ['auto', 'sqrt', 'log2', None]), 'min_samples_split': trial.suggest_int('min_samples_split', 2, 20), 'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, 20), 'bootstrap': trial.suggest_categorical('bootstrap', [True, False]), 'n_jobs': -1}
if task in CLASSIFICATION_TASKS:
params['criterion'] = trial.suggest_categorical('criterion', ['gini', 'entropy'])
return params
if task in REGRESSION_TASKS:
params['criterion'] = trial.suggest_categorical('criterion', ['squared_error', 'absolute_error'])
return params
raise ValueError('Task not supported')
if model == 'decision_tree':
params = {'max_depth': trial.suggest_int('max_depth', 1, 15), 'min_samples_split': trial.suggest_int('min_samples_split', 2, 20), 'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, 20), 'max_features': trial.suggest_categorical('max_features', ['auto', 'sqrt', 'log2', None]), 'splitter': trial.suggest_categorical('splitter', ['best', 'random'])}
if task in CLASSIFICATION_TASKS:
params['criterion'] = trial.suggest_categorical('criterion', ['gini', 'entropy'])
return params
if task in REGRESSION_TASKS:
params['criterion'] = trial.suggest_categorical('criterion', ['squared_error', 'absolute_error', 'friedman_mse', 'poisson'])
return params
raise ValueError('Task not supported')
if model == 'linear_regression':
if task in REGRESSION_TASKS:
params = {'fit_intercept': trial.suggest_categorical('fit_intercept', [True, False])}
return params
raise ValueError('Task not supported')
if model == 'svm':
if task in CLASSIFICATION_TASKS:
params = {'C': trial.suggest_float('C', 1e-08, 1000.0, log=True), 'fit_intercept': trial.suggest_categorical('fit_intercept', [True, False]), 'penalty': 'l2', 'max_iter': trial.suggest_int('max_iter', 1000, 10000)}
return params
if task in REGRESSION_TASKS:
params = {'C': trial.suggest_float('C', 1e-08, 1000.0, log=True), 'fit_intercept': trial.suggest_categorical('fit_intercept', [True, False]), 'loss': trial.suggest_categorical('loss', ['epsilon_insensitive', 'squared_epsilon_insensitive']), 'epsilon': trial.suggest_float('epsilon', 1e-08, 0.1, log=True), 'max_iter': trial.suggest_int('max_iter', 1000, 10000)}
return params
raise ValueError('Task not supported')
if model == 'ridge':
params = {'alpha': trial.suggest_float('alpha', 1e-08, 1000.0, log=True), 'fit_intercept': trial.suggest_categorical('fit_intercept', [True, False]), 'max_iter': trial.suggest_int('max_iter', 1000, 10000)}
if task in CLASSIFICATION_TASKS:
return params
if task in REGRESSION_TASKS:
return params
raise ValueError('Task not supported')
if model == 'lasso':
if task in REGRESSION_TASKS:
params = {'alpha': trial.suggest_float('alpha', 1e-08, 1000.0, log=True), 'fit_intercept': trial.suggest_categorical('fit_intercept', [True, False]), 'max_iter': trial.suggest_int('max_iter', 1000, 10000)}
return params
raise ValueError('Task not supported')
if model == 'knn':
params = {'n_neighbors': trial.suggest_int('n_neighbors', 1, 25), 'weights': trial.suggest_categorical('weights', ['uniform', 'distance']), 'algorithm': trial.suggest_categorical('algorithm', ['ball_tree', 'kd_tree', 'brute']), 'leaf_size': trial.suggest_int('leaf_size', 1, 100), 'p': trial.suggest_categorical('p', [1, 2]), 'metric': trial.suggest_categorical('metric', ['minkowski', 'euclidean', 'manhattan'])}
if task in CLASSIFICATION_TASKS or task in REGRESSION_TASKS:
return params
raise ValueError('Task not supported')
return ValueError('Invalid model')
def get_imputer(imputer_name):
if imputer_name is None:
return None
if imputer_name == 'median':
return impute.SimpleImputer(strategy='median')
if imputer_name == 'mean':
return impute.SimpleImputer(strategy='mean')
if imputer_name == 'most_frequent':
return impute.SimpleImputer(strategy='most_frequent')
raise ValueError('Invalid imputer')
def get_scaler(scaler_name):
if scaler_name is None:
return None
if scaler_name == 'standard':
return preprocessing.StandardScaler()
if scaler_name == 'minmax':
return preprocessing.MinMaxScaler()
if scaler_name == 'robust':
return preprocessing.RobustScaler()
if scaler_name == 'normal':
return preprocessing.Normalizer()
raise ValueError('Invalid scaler')
def get_metric_direction(sub_task):
if sub_task == 'binary_classification':
return ('logloss', 'minimize')
if sub_task == 'multi_class_classification':
return ('mlogloss', 'minimize')
if sub_task == 'single_column_regression':
return ('rmse', 'minimize')
if sub_task == 'multi_label_classification':
return ('logloss', 'minimize')
if sub_task == 'multi_column_regression':
return ('rmse', 'minimize')
raise ValueError('Invalid sub_task')
def get_categorical_columns(df):
return list(df.select_dtypes(include=['category', 'object']).columns)
def get_numerical_columns(df):
return list(df.select_dtypes(include=['number']).columns)
def create_model_card(config, sub_task, best_params, best_metrics):
best_metrics = '\n'.join([f'- {k}: {v}' for (k, v) in best_metrics.items()])
best_params = '\n'.join([f'- {k}: {v}' for (k, v) in best_params.items()])
return MARKDOWN.format(task=config.task, dataset=config.data_path, metrics=best_metrics, params=best_params)
# File: autotrain-advanced-main/src/autotrain/trainers/text_classification/__main__.py
import argparse
import json
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EarlyStoppingCallback, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.text_classification import utils
from autotrain.trainers.text_classification.dataset import TextClassificationDataset
from autotrain.trainers.text_classification.params import TextClassificationParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = TextClassificationParams(**config)
train_data = None
valid_data = None
if config.train_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
classes = train_data.features[config.target_column].names
label2id = {c: i for (i, c) in enumerate(classes)}
num_classes = len(classes)
if num_classes < 2:
raise ValueError('Invalid number of classes. Must be greater than 1.')
if config.valid_split is not None:
num_classes_valid = len(valid_data.unique(config.target_column))
if num_classes_valid != num_classes:
raise ValueError(f'Number of classes in train and valid are not the same. Training has {num_classes} and valid has {num_classes_valid}')
model_config = AutoConfig.from_pretrained(config.model, num_labels=num_classes)
model_config._num_labels = len(label2id)
model_config.label2id = label2id
model_config.id2label = {v: k for (k, v) in label2id.items()}
try:
model = AutoModelForSequenceClassification.from_pretrained(config.model, config=model_config, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
except OSError:
model = AutoModelForSequenceClassification.from_pretrained(config.model, config=model_config, from_tf=True, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
train_data = TextClassificationDataset(data=train_data, tokenizer=tokenizer, config=config)
if config.valid_split is not None:
valid_data = TextClassificationDataset(data=valid_data, tokenizer=tokenizer, config=config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
args = TrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, compute_metrics=utils._binary_classification_metrics if num_classes == 2 else utils._multi_class_classification_metrics)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
tokenizer.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer, num_classes)
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
args = parse_args()
training_config = json.load(open(args.training_config))
config = TextClassificationParams(**training_config)
train(config)
# File: autotrain-advanced-main/src/autotrain/trainers/text_classification/dataset.py
import torch
class TextClassificationDataset:
def __init__(self, data, tokenizer, config):
self.data = data
self.tokenizer = tokenizer
self.config = config
self.text_column = self.config.text_column
self.target_column = self.config.target_column
def __len__(self):
return len(self.data)
def __getitem__(self, item):
text = str(self.data[item][self.text_column])
target = self.data[item][self.target_column]
target = int(target)
inputs = self.tokenizer(text, max_length=self.config.max_seq_length, padding='max_length', truncation=True)
ids = inputs['input_ids']
mask = inputs['attention_mask']
if 'token_type_ids' in inputs:
token_type_ids = inputs['token_type_ids']
else:
token_type_ids = None
if token_type_ids is not None:
return {'input_ids': torch.tensor(ids, dtype=torch.long), 'attention_mask': torch.tensor(mask, dtype=torch.long), 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), 'labels': torch.tensor(target, dtype=torch.long)}
return {'input_ids': torch.tensor(ids, dtype=torch.long), 'attention_mask': torch.tensor(mask, dtype=torch.long), 'labels': torch.tensor(target, dtype=torch.long)}
# File: autotrain-advanced-main/src/autotrain/trainers/text_classification/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class TextClassificationParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('bert-base-uncased', title='Model name')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
max_seq_length: int = Field(128, title='Max sequence length')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
text_column: str = Field('text', title='Text column')
target_column: str = Field('target', title='Target column')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
username: Optional[str] = Field(None, title='Hugging Face Username')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/text_classification/utils.py
import os
import numpy as np
import requests
from sklearn import metrics
BINARY_CLASSIFICATION_EVAL_METRICS = ('eval_loss', 'eval_accuracy', 'eval_f1', 'eval_auc', 'eval_precision', 'eval_recall')
MULTI_CLASS_CLASSIFICATION_EVAL_METRICS = ('eval_loss', 'eval_accuracy', 'eval_f1_macro', 'eval_f1_micro', 'eval_f1_weighted', 'eval_precision_macro', 'eval_precision_micro', 'eval_precision_weighted', 'eval_recall_macro', 'eval_recall_micro', 'eval_recall_weighted')
MODEL_CARD = '\n---\ntags:\n- autotrain\n- text-classification{base_model}\nwidget:\n- text: "I love AutoTrain"{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Text Classification\n\n## Validation Metrics\n{validation_metrics}\n'
def _binary_classification_metrics(pred):
(raw_predictions, labels) = pred
predictions = np.argmax(raw_predictions, axis=1)
result = {'f1': metrics.f1_score(labels, predictions), 'precision': metrics.precision_score(labels, predictions), 'recall': metrics.recall_score(labels, predictions), 'auc': metrics.roc_auc_score(labels, raw_predictions[:, 1]), 'accuracy': metrics.accuracy_score(labels, predictions)}
return result
def _multi_class_classification_metrics(pred):
(raw_predictions, labels) = pred
predictions = np.argmax(raw_predictions, axis=1)
results = {'f1_macro': metrics.f1_score(labels, predictions, average='macro'), 'f1_micro': metrics.f1_score(labels, predictions, average='micro'), 'f1_weighted': metrics.f1_score(labels, predictions, average='weighted'), 'precision_macro': metrics.precision_score(labels, predictions, average='macro'), 'precision_micro': metrics.precision_score(labels, predictions, average='micro'), 'precision_weighted': metrics.precision_score(labels, predictions, average='weighted'), 'recall_macro': metrics.recall_score(labels, predictions, average='macro'), 'recall_micro': metrics.recall_score(labels, predictions, average='micro'), 'recall_weighted': metrics.recall_score(labels, predictions, average='weighted'), 'accuracy': metrics.accuracy_score(labels, predictions)}
return results
def create_model_card(config, trainer, num_classes):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
valid_metrics = BINARY_CLASSIFICATION_EVAL_METRICS if num_classes == 2 else MULTI_CLASS_CLASSIFICATION_EVAL_METRICS
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items() if k in valid_metrics]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
def pause_endpoint(params):
endpoint_id = os.environ['ENDPOINT_ID']
username = endpoint_id.split('/')[0]
project_name = endpoint_id.split('/')[1]
api_url = f'https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause'
headers = {'Authorization': f'Bearer {params.token}'}
r = requests.post(api_url, headers=headers)
return r.json()
# File: autotrain-advanced-main/src/autotrain/trainers/text_regression/__main__.py
import argparse
import json
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EarlyStoppingCallback, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.text_regression import utils
from autotrain.trainers.text_regression.dataset import TextRegressionDataset
from autotrain.trainers.text_regression.params import TextRegressionParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = TextRegressionParams(**config)
train_data = None
valid_data = None
if config.train_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
model_config = AutoConfig.from_pretrained(config.model, num_labels=1, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token)
model_config._num_labels = 1
label2id = {'target': 0}
model_config.label2id = label2id
model_config.id2label = {v: k for (k, v) in label2id.items()}
try:
model = AutoModelForSequenceClassification.from_pretrained(config.model, config=model_config, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
except OSError:
model = AutoModelForSequenceClassification.from_pretrained(config.model, config=model_config, from_tf=True, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
train_data = TextRegressionDataset(data=train_data, tokenizer=tokenizer, config=config)
if config.valid_split is not None:
valid_data = TextRegressionDataset(data=valid_data, tokenizer=tokenizer, config=config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
args = TrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, compute_metrics=utils.single_column_regression_metrics)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
tokenizer.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer)
with open(f'{config.project_name}/README.md', 'w') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
args = parse_args()
training_config = json.load(open(args.training_config))
config = TextRegressionParams(**training_config)
train(config)
# File: autotrain-advanced-main/src/autotrain/trainers/text_regression/dataset.py
import torch
class TextRegressionDataset:
def __init__(self, data, tokenizer, config):
self.data = data
self.tokenizer = tokenizer
self.config = config
self.text_column = self.config.text_column
self.target_column = self.config.target_column
self.max_len = self.config.max_seq_length
def __len__(self):
return len(self.data)
def __getitem__(self, item):
text = str(self.data[item][self.text_column])
target = float(self.data[item][self.target_column])
inputs = self.tokenizer(text, max_length=self.max_len, padding='max_length', truncation=True)
ids = inputs['input_ids']
mask = inputs['attention_mask']
if 'token_type_ids' in inputs:
token_type_ids = inputs['token_type_ids']
else:
token_type_ids = None
if token_type_ids is not None:
return {'input_ids': torch.tensor(ids, dtype=torch.long), 'attention_mask': torch.tensor(mask, dtype=torch.long), 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), 'labels': torch.tensor(target, dtype=torch.float)}
return {'input_ids': torch.tensor(ids, dtype=torch.long), 'attention_mask': torch.tensor(mask, dtype=torch.long), 'labels': torch.tensor(target, dtype=torch.float)}
# File: autotrain-advanced-main/src/autotrain/trainers/text_regression/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class TextRegressionParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('bert-base-uncased', title='Model name')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
max_seq_length: int = Field(128, title='Max sequence length')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
text_column: str = Field('text', title='Text column')
target_column: str = Field('target', title='Target column(s)')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
username: Optional[str] = Field(None, title='Hugging Face Username')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/text_regression/utils.py
import os
import numpy as np
from sklearn import metrics
SINGLE_COLUMN_REGRESSION_EVAL_METRICS = ('eval_loss', 'eval_mse', 'eval_mae', 'eval_r2', 'eval_rmse', 'eval_explained_variance')
MODEL_CARD = '\n---\ntags:\n- autotrain\n- text-regression{base_model}\nwidget:\n- text: "I love AutoTrain"{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Text Regression\n\n## Validation Metrics\n{validation_metrics}\n'
def single_column_regression_metrics(pred):
(raw_predictions, labels) = pred
def safe_compute(metric_func, default=-999):
try:
return metric_func(labels, raw_predictions)
except Exception:
return default
pred_dict = {'mse': safe_compute(lambda labels, predictions: metrics.mean_squared_error(labels, predictions)), 'mae': safe_compute(lambda labels, predictions: metrics.mean_absolute_error(labels, predictions)), 'r2': safe_compute(lambda labels, predictions: metrics.r2_score(labels, predictions)), 'rmse': safe_compute(lambda labels, predictions: np.sqrt(metrics.mean_squared_error(labels, predictions))), 'explained_variance': safe_compute(lambda labels, predictions: metrics.explained_variance_score(labels, predictions))}
for (key, value) in pred_dict.items():
pred_dict[key] = float(value)
return pred_dict
def create_model_card(config, trainer):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items() if k in SINGLE_COLUMN_REGRESSION_EVAL_METRICS]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
# File: autotrain-advanced-main/src/autotrain/trainers/token_classification/__main__.py
import argparse
import json
from functools import partial
from accelerate.state import PartialState
from datasets import load_dataset, load_from_disk
from huggingface_hub import HfApi
from transformers import AutoConfig, AutoModelForTokenClassification, AutoTokenizer, EarlyStoppingCallback, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, monitor, pause_space, remove_autotrain_data, save_training_params
from autotrain.trainers.token_classification import utils
from autotrain.trainers.token_classification.dataset import TokenClassificationDataset
from autotrain.trainers.token_classification.params import TokenClassificationParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = TokenClassificationParams(**config)
train_data = None
valid_data = None
if config.train_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
logger.info('loading dataset from disk')
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
label_list = train_data.features[config.tags_column].feature.names
num_classes = len(label_list)
model_config = AutoConfig.from_pretrained(config.model, num_labels=num_classes)
model_config._num_labels = num_classes
model_config.label2id = {l: i for (i, l) in enumerate(label_list)}
model_config.id2label = dict(enumerate(label_list))
try:
model = AutoModelForTokenClassification.from_pretrained(config.model, config=model_config, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
except OSError:
model = AutoModelForTokenClassification.from_pretrained(config.model, config=model_config, from_tf=True, trust_remote_code=ALLOW_REMOTE_CODE, token=config.token, ignore_mismatched_sizes=True)
if model_config.model_type in {'bloom', 'gpt2', 'roberta'}:
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
train_data = TokenClassificationDataset(data=train_data, tokenizer=tokenizer, config=config)
if config.valid_split is not None:
valid_data = TokenClassificationDataset(data=valid_data, tokenizer=tokenizer, config=config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=2 * config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False)
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=config.early_stopping_patience, early_stopping_threshold=config.early_stopping_threshold)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
callbacks_to_use.extend([UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()])
args = TrainingArguments(**training_args)
trainer_args = dict(args=args, model=model, callbacks=callbacks_to_use, compute_metrics=partial(utils.token_classification_metrics, label_list=label_list))
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data)
trainer.remove_callback(PrinterCallback)
trainer.train()
logger.info('Finished training, saving model...')
trainer.save_model(config.project_name)
tokenizer.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer)
with open(f'{config.project_name}/README.md', 'w', encoding='utf-8') as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
save_training_params(config)
logger.info('Pushing model to hub...')
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
if __name__ == '__main__':
args = parse_args()
training_config = json.load(open(args.training_config))
config = TokenClassificationParams(**training_config)
train(config)
# File: autotrain-advanced-main/src/autotrain/trainers/token_classification/dataset.py
class TokenClassificationDataset:
def __init__(self, data, tokenizer, config):
self.data = data
self.tokenizer = tokenizer
self.config = config
def __len__(self):
return len(self.data)
def __getitem__(self, item):
text = self.data[item][self.config.tokens_column]
tags = self.data[item][self.config.tags_column]
label_list = self.data.features[self.config.tags_column].feature.names
label_to_id = {i: i for i in range(len(label_list))}
tokenized_text = self.tokenizer(text, max_length=self.config.max_seq_length, padding='max_length', truncation=True, is_split_into_words=True)
word_ids = tokenized_text.word_ids(batch_index=0)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[tags[word_idx]])
else:
label_ids.append(label_to_id[tags[word_idx]])
previous_word_idx = word_idx
tokenized_text['labels'] = label_ids
return tokenized_text
# File: autotrain-advanced-main/src/autotrain/trainers/token_classification/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class TokenClassificationParams(AutoTrainParams):
data_path: str = Field(None, title='Data path')
model: str = Field('bert-base-uncased', title='Model name')
lr: float = Field(5e-05, title='Learning rate')
epochs: int = Field(3, title='Number of training epochs')
max_seq_length: int = Field(128, title='Max sequence length')
batch_size: int = Field(8, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(1, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
train_split: str = Field('train', title='Train split')
valid_split: Optional[str] = Field(None, title='Validation split')
tokens_column: str = Field('tokens', title='Tokens column')
tags_column: str = Field('tags', title='Tags column')
logging_steps: int = Field(-1, title='Logging steps')
project_name: str = Field('project-name', title='Output directory')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
save_total_limit: int = Field(1, title='Save total limit')
token: Optional[str] = Field(None, title='Hub Token')
push_to_hub: bool = Field(False, title='Push to hub')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
username: Optional[str] = Field(None, title='Hugging Face Username')
log: str = Field('none', title='Logging using experiment tracking')
early_stopping_patience: int = Field(5, title='Early stopping patience')
early_stopping_threshold: float = Field(0.01, title='Early stopping threshold')
# File: autotrain-advanced-main/src/autotrain/trainers/token_classification/utils.py
import os
import numpy as np
from seqeval import metrics
MODEL_CARD = '\n---\ntags:\n- autotrain\n- token-classification{base_model}\nwidget:\n- text: "I love AutoTrain"{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Token Classification\n\n## Validation Metrics\n{validation_metrics}\n'
def token_classification_metrics(pred, label_list):
(predictions, labels) = pred
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[predi] for (predi, lbl) in zip(prediction, label) if lbl != -100] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[lbl] for (predi, lbl) in zip(prediction, label) if lbl != -100] for (prediction, label) in zip(predictions, labels)]
results = {'precision': metrics.precision_score(true_labels, true_predictions), 'recall': metrics.recall_score(true_labels, true_predictions), 'f1': metrics.f1_score(true_labels, true_predictions), 'accuracy': metrics.accuracy_score(true_labels, true_predictions)}
return results
def create_model_card(config, trainer):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
valid_metrics = ['eval_loss', 'eval_precision', 'eval_recall', 'eval_f1', 'eval_accuracy']
eval_scores = [f"{k[len('eval_'):]}: {v}" for (k, v) in eval_scores.items() if k in valid_metrics]
eval_scores = '\n\n'.join(eval_scores)
else:
eval_scores = 'No validation metrics available'
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, validation_metrics=eval_scores, base_model=base_model)
return model_card
# File: autotrain-advanced-main/src/autotrain/trainers/vlm/__main__.py
import argparse
import json
from autotrain.trainers.common import monitor
from autotrain.trainers.vlm import utils
from autotrain.trainers.vlm.params import VLMTrainingParams
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--training_config', type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = VLMTrainingParams(**config)
if not utils.check_model_support(config):
raise ValueError(f'model `{config.model}` not supported')
if config.trainer in ('vqa', 'captioning'):
from autotrain.trainers.vlm.train_vlm_generic import train as train_generic
train_generic(config)
else:
raise ValueError(f'trainer `{config.trainer}` not supported')
if __name__ == '__main__':
_args = parse_args()
training_config = json.load(open(_args.training_config))
_config = VLMTrainingParams(**training_config)
train(_config)
# File: autotrain-advanced-main/src/autotrain/trainers/vlm/params.py
from typing import Optional
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class VLMTrainingParams(AutoTrainParams):
model: str = Field('google/paligemma-3b-pt-224', title='Model name')
project_name: str = Field('project-name', title='Output directory')
data_path: str = Field('data', title='Data path')
train_split: str = Field('train', title='Train data config')
valid_split: Optional[str] = Field(None, title='Validation data config')
trainer: str = Field('vqa', title='Trainer type')
log: str = Field('none', title='Logging using experiment tracking')
disable_gradient_checkpointing: bool = Field(False, title='Gradient checkpointing')
logging_steps: int = Field(-1, title='Logging steps')
eval_strategy: str = Field('epoch', title='Evaluation strategy')
save_total_limit: int = Field(1, title='Save total limit')
auto_find_batch_size: bool = Field(False, title='Auto find batch size')
mixed_precision: Optional[str] = Field(None, title='fp16, bf16, or None')
lr: float = Field(3e-05, title='Learning rate')
epochs: int = Field(1, title='Number of training epochs')
batch_size: int = Field(2, title='Training batch size')
warmup_ratio: float = Field(0.1, title='Warmup proportion')
gradient_accumulation: int = Field(4, title='Gradient accumulation steps')
optimizer: str = Field('adamw_torch', title='Optimizer')
scheduler: str = Field('linear', title='Scheduler')
weight_decay: float = Field(0.0, title='Weight decay')
max_grad_norm: float = Field(1.0, title='Max gradient norm')
seed: int = Field(42, title='Seed')
quantization: Optional[str] = Field('int4', title='int4, int8, or None')
target_modules: Optional[str] = Field('all-linear', title='Target modules')
merge_adapter: bool = Field(False, title='Merge adapter')
peft: bool = Field(False, title='Use PEFT')
lora_r: int = Field(16, title='Lora r')
lora_alpha: int = Field(32, title='Lora alpha')
lora_dropout: float = Field(0.05, title='Lora dropout')
image_column: Optional[str] = Field('image', title='Image column')
text_column: str = Field('text', title='Text (answer) column')
prompt_text_column: Optional[str] = Field('prompt', title='Prompt (prefix) column')
push_to_hub: bool = Field(False, title='Push to hub')
username: Optional[str] = Field(None, title='Hugging Face Username')
token: Optional[str] = Field(None, title='Huggingface token')
# File: autotrain-advanced-main/src/autotrain/trainers/vlm/train_vlm_generic.py
from functools import partial
from datasets import load_dataset, load_from_disk
from transformers import AutoProcessor, Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE
from autotrain.trainers.vlm import utils
def collate_fn(examples, config, processor):
prompts = ['answer ' + example[config.prompt_text_column] for example in examples]
labels = [example[config.text_column] for example in examples]
images = [example[config.image_column].convert('RGB') for example in examples]
tokens = processor(text=prompts, images=images, suffix=labels, return_tensors='pt', padding='longest', tokenize_newline_separately=False)
return tokens
def train(config):
valid_data = None
if config.data_path == f'{config.project_name}/autotrain-data':
train_data = load_from_disk(config.data_path)[config.train_split]
elif ':' in config.train_split:
(dataset_config_name, split) = config.train_split.split(':')
train_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
train_data = load_dataset(config.data_path, split=config.train_split, token=config.token)
if config.valid_split is not None:
if config.data_path == f'{config.project_name}/autotrain-data':
valid_data = load_from_disk(config.data_path)[config.valid_split]
elif ':' in config.valid_split:
(dataset_config_name, split) = config.valid_split.split(':')
valid_data = load_dataset(config.data_path, name=dataset_config_name, split=split, token=config.token)
else:
valid_data = load_dataset(config.data_path, split=config.valid_split, token=config.token)
logger.info(f'Train data: {train_data}')
logger.info(f'Valid data: {valid_data}')
if config.trainer == 'captioning':
config.prompt_text_column = 'caption'
processor = AutoProcessor.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
logging_steps = utils.configure_logging_steps(config, train_data, valid_data)
training_args = utils.configure_training_args(config, logging_steps)
args = TrainingArguments(**training_args)
model = utils.get_model(config)
logger.info('creating trainer')
callbacks = utils.get_callbacks(config)
trainer_args = dict(args=args, model=model, callbacks=callbacks)
col_fn = partial(collate_fn, config=config, processor=processor)
trainer = Trainer(**trainer_args, train_dataset=train_data, eval_dataset=valid_data if valid_data is not None else None, data_collator=col_fn)
trainer.remove_callback(PrinterCallback)
trainer.train()
utils.post_training_steps(config, trainer)
# File: autotrain-advanced-main/src/autotrain/trainers/vlm/utils.py
import os
import torch
from accelerate import PartialState
from huggingface_hub import HfApi
from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
from transformers import AutoConfig, BitsAndBytesConfig, PaliGemmaForConditionalGeneration
from autotrain import logger
from autotrain.trainers.common import ALLOW_REMOTE_CODE, LossLoggingCallback, TrainStartCallback, UploadLogs, pause_space, remove_autotrain_data, save_training_params
TARGET_MODULES = {}
SUPPORTED_MODELS = ['PaliGemmaForConditionalGeneration']
MODEL_CARD = '\n---\ntags:\n- autotrain\n- text-generation-inference\n- image-text-to-text\n- text-generation{peft}\nlibrary_name: transformers{base_model}\nlicense: other{dataset_tag}\n---\n\n# Model Trained Using AutoTrain\n\nThis model was trained using AutoTrain. For more information, please visit [AutoTrain](https://hf.co/docs/autotrain).\n\n# Usage\n\n```python\n# you will need to adjust code if you didnt use peft\n\nfrom PIL import Image\nfrom transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor\nimport torch\nimport requests\nfrom peft import PeftModel\n\nbase_model_id = BASE_MODEL_ID\npeft_model_id = THIS_MODEL_ID\nmax_new_tokens = 100\ntext = "Whats on the flower?"\nimg_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/bee.JPG?download=true"\nimage = Image.open(requests.get(img_url, stream=True).raw)\n\ndevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")\nbase_model = PaliGemmaForConditionalGeneration.from_pretrained(base_model_id)\nprocessor = PaliGemmaProcessor.from_pretrained(base_model_id)\n\nmodel = PeftModel.from_pretrained(base_model, peft_model_id)\nmodel.merge_and_unload()\n\nmodel = model.eval().to(device)\n\ninputs = processor(text=text, images=image, return_tensors="pt").to(device)\nwith torch.inference_mode():\n generated_ids = model.generate(\n **inputs,\n max_new_tokens=max_new_tokens,\n do_sample=False,\n )\nresult = processor.batch_decode(generated_ids, skip_special_tokens=True)\nprint(result)\n```\n'
def get_target_modules(config):
if config.target_modules is None:
return TARGET_MODULES.get(config.model)
if config.target_modules.strip() == '':
return TARGET_MODULES.get(config.model)
if config.target_modules.strip().lower() == 'all-linear':
return 'all-linear'
return config.target_modules.split(',')
def create_model_card(config):
if config.peft:
peft = '\n- peft'
else:
peft = ''
if config.data_path == f'{config.project_name}/autotrain-data' or os.path.isdir(config.data_path):
dataset_tag = ''
else:
dataset_tag = f'\ndatasets:\n- {config.data_path}'
if os.path.isdir(config.model):
base_model = ''
else:
base_model = f'\nbase_model: {config.model}'
model_card = MODEL_CARD.format(dataset_tag=dataset_tag, peft=peft, base_model=base_model)
return model_card.strip()
def check_model_support(config):
api = HfApi(token=config.token)
model_info = api.model_info(config.model)
architectures = model_info.config.get('architectures', [])
for arch in architectures:
if arch in SUPPORTED_MODELS:
return True
return False
def configure_logging_steps(config, train_data, valid_data):
logger.info('configuring logging steps')
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
if logging_steps > 25:
logging_steps = 25
config.logging_steps = logging_steps
else:
logging_steps = config.logging_steps
logger.info(f'Logging steps: {logging_steps}')
return logging_steps
def configure_training_args(config, logging_steps):
logger.info('configuring training args')
training_args = dict(output_dir=config.project_name, per_device_train_batch_size=config.batch_size, per_device_eval_batch_size=config.batch_size, learning_rate=config.lr, num_train_epochs=config.epochs, eval_strategy=config.eval_strategy if config.valid_split is not None else 'no', logging_steps=logging_steps, save_total_limit=config.save_total_limit, save_strategy=config.eval_strategy if config.valid_split is not None else 'no', gradient_accumulation_steps=config.gradient_accumulation, report_to=config.log, auto_find_batch_size=config.auto_find_batch_size, lr_scheduler_type=config.scheduler, optim=config.optimizer, warmup_ratio=config.warmup_ratio, weight_decay=config.weight_decay, max_grad_norm=config.max_grad_norm, push_to_hub=False, load_best_model_at_end=True if config.valid_split is not None else False, ddp_find_unused_parameters=False, gradient_checkpointing=not config.disable_gradient_checkpointing, remove_unused_columns=False)
if not config.disable_gradient_checkpointing:
if config.peft and config.quantization in ('int4', 'int8'):
training_args['gradient_checkpointing_kwargs'] = {'use_reentrant': True}
else:
training_args['gradient_checkpointing_kwargs'] = {'use_reentrant': False}
if config.mixed_precision == 'fp16':
training_args['fp16'] = True
if config.mixed_precision == 'bf16':
training_args['bf16'] = True
return training_args
def get_callbacks(config):
callbacks = [UploadLogs(config=config), LossLoggingCallback(), TrainStartCallback()]
return callbacks
def get_model(config):
logger.info('loading model config...')
model_config = AutoConfig.from_pretrained(config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_cache=config.disable_gradient_checkpointing)
logger.info('loading model...')
if config.peft:
if config.quantization == 'int4':
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=False)
elif config.quantization == 'int8':
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
else:
bnb_config = None
model = PaliGemmaForConditionalGeneration.from_pretrained(config.model, config=model_config, token=config.token, quantization_config=bnb_config, trust_remote_code=ALLOW_REMOTE_CODE)
else:
model = PaliGemmaForConditionalGeneration.from_pretrained(config.model, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE)
logger.info(f'model dtype: {model.dtype}')
if config.peft:
logger.info('preparing peft model...')
if config.quantization is not None:
gradient_checkpointing_kwargs = {}
if not config.disable_gradient_checkpointing:
if config.quantization in ('int4', 'int8'):
gradient_checkpointing_kwargs = {'use_reentrant': True}
else:
gradient_checkpointing_kwargs = {'use_reentrant': False}
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=not config.disable_gradient_checkpointing, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs)
else:
model.enable_input_require_grads()
peft_config = LoraConfig(r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias='none', task_type='CAUSAL_LM', target_modules=get_target_modules(config))
model = get_peft_model(model, peft_config)
for param in model.vision_tower.parameters():
param.requires_grad = False
for param in model.multi_modal_projector.parameters():
param.requires_grad = False
return model
def merge_adapter(base_model_path, target_model_path, adapter_path):
logger.info('Loading adapter...')
model = PaliGemmaForConditionalGeneration.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=ALLOW_REMOTE_CODE)
model = PeftModel.from_pretrained(model, adapter_path)
model = model.merge_and_unload()
logger.info('Saving target model...')
model.save_pretrained(target_model_path)
def post_training_steps(config, trainer):
logger.info('Finished training, saving model...')
trainer.model.config.use_cache = True
trainer.save_model(config.project_name)
model_card = create_model_card(config)
with open(f'{config.project_name}/README.md', 'w', encoding='utf-8') as f:
f.write(model_card)
if config.peft and config.merge_adapter:
logger.info('Merging adapter weights...')
try:
del trainer
torch.cuda.empty_cache()
merge_adapter(base_model_path=config.model, target_model_path=config.project_name, adapter_path=config.project_name)
for file in os.listdir(config.project_name):
if file.startswith('adapter_'):
os.remove(f'{config.project_name}/{file}')
except Exception as e:
logger.warning(f'Failed to merge adapter weights: {e}')
logger.warning('Skipping adapter merge. Only adapter weights will be saved.')
if config.push_to_hub:
if PartialState().process_index == 0:
remove_autotrain_data(config)
logger.info('Pushing model to hub...')
save_training_params(config)
api = HfApi(token=config.token)
api.create_repo(repo_id=f'{config.username}/{config.project_name}', repo_type='model', private=True, exist_ok=True)
api.upload_folder(folder_path=config.project_name, repo_id=f'{config.username}/{config.project_name}', repo_type='model')
if PartialState().process_index == 0:
pause_space(config)
# File: autotrain-advanced-main/src/autotrain/utils.py
import json
import os
import subprocess
from autotrain.commands import launch_command
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.generic.params import GenericParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
ALLOW_REMOTE_CODE = os.environ.get('ALLOW_REMOTE_CODE', 'true').lower() == 'true'
def run_training(params, task_id, local=False, wait=False):
params = json.loads(params)
if isinstance(params, str):
params = json.loads(params)
if task_id == 9:
params = LLMTrainingParams(**params)
elif task_id == 28:
params = Seq2SeqParams(**params)
elif task_id in (1, 2):
params = TextClassificationParams(**params)
elif task_id in (13, 14, 15, 16, 26):
params = TabularParams(**params)
elif task_id == 27:
params = GenericParams(**params)
elif task_id == 25:
params = DreamBoothTrainingParams(**params)
elif task_id == 18:
params = ImageClassificationParams(**params)
elif task_id == 4:
params = TokenClassificationParams(**params)
elif task_id == 10:
params = TextRegressionParams(**params)
elif task_id == 29:
params = ObjectDetectionParams(**params)
elif task_id == 30:
params = SentenceTransformersParams(**params)
elif task_id == 24:
params = ImageRegressionParams(**params)
elif task_id == 31:
params = VLMTrainingParams(**params)
elif task_id == 5:
params = ExtractiveQuestionAnsweringParams(**params)
else:
raise NotImplementedError
params.save(output_dir=params.project_name)
cmd = launch_command(params=params)
cmd = [str(c) for c in cmd]
env = os.environ.copy()
process = subprocess.Popen(cmd, env=env)
if wait:
process.wait()
return process.pid