File size: 6,193 Bytes
36f7534
 
 
 
 
 
d34e3fe
36f7534
 
 
 
 
 
 
 
 
 
 
d34e3fe
36f7534
 
 
 
d34e3fe
 
 
 
 
 
 
 
 
36f7534
 
 
490a61c
 
36f7534
490a61c
36f7534
 
 
 
d34e3fe
 
 
 
 
36f7534
490a61c
bb853de
 
 
 
36f7534
 
 
 
 
 
 
490a61c
 
 
d34e3fe
490a61c
 
36f7534
 
 
490a61c
 
36f7534
 
 
 
 
 
 
 
 
 
 
 
490a61c
 
36f7534
490a61c
36f7534
 
 
 
 
 
 
 
 
490a61c
36f7534
 
 
 
 
 
 
 
 
490a61c
 
36f7534
490a61c
36f7534
490a61c
 
36f7534
490a61c
 
36f7534
 
 
 
490a61c
36f7534
 
 
490a61c
 
36f7534
 
 
 
 
 
 
490a61c
36f7534
 
 
490a61c
36f7534
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490a61c
36f7534
 
490a61c
36f7534
 
 
490a61c
 
36f7534
 
490a61c
 
36f7534
 
 
 
 
 
490a61c
36f7534
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183

""" Finetuning the library models for sequence classification."""

import logging
import os
import sys
from dataclasses import dataclass
from typing import Optional

import datasets
import numpy as np

import transformers
from transformers import (
    DataCollatorWithPadding,
    EvalPrediction,
    HfArgumentParser,
    Trainer,
    TrainingArguments,
    set_seed,
)
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from shared import (
    CATEGORIES,
    DatasetArguments,
    prepare_datasets,
    load_datasets,
    CustomTrainingArguments,
    train_from_checkpoint,
    get_last_checkpoint
)
from model import get_model_tokenizer, ModelArguments

# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r requirements.txt')

os.environ['WANDB_DISABLED'] = 'true'

logger = logging.getLogger(__name__)


@dataclass
class ClassifierTrainingArguments(CustomTrainingArguments, TrainingArguments):
    pass


@dataclass
class ClassifierDatasetArguments(DatasetArguments):
    def __post_init__(self):
        self.train_file = self.c_train_file
        self.validation_file = self.c_validation_file
        self.test_file = self.c_test_file


def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    hf_parser = HfArgumentParser((
        ModelArguments,
        ClassifierDatasetArguments,
        ClassifierTrainingArguments
    ))
    model_args, dataset_args, training_args = hf_parser.parse_args_into_dataclasses()

    # Setup logging
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        handlers=[logging.StreamHandler(sys.stdout)],
    )

    log_level = training_args.get_process_log_level()
    logger.setLevel(log_level)
    datasets.utils.logging.set_verbosity(log_level)
    transformers.utils.logging.set_verbosity(log_level)
    transformers.utils.logging.enable_default_handler()
    transformers.utils.logging.enable_explicit_format()

    # Log on each process the small summary:
    logger.warning(
        f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
        + f'distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}'
    )
    logger.info(f'Training/evaluation parameters {training_args}')

    # Detecting last checkpoint.
    last_checkpoint = get_last_checkpoint(training_args)

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # Loading a dataset from your local files.
    # CSV/JSON training and evaluation files are needed.
    raw_datasets = load_datasets(dataset_args)

    # See more about loading any type of standard or custom dataset at
    # https://huggingface.co/docs/datasets/loading_datasets.html.

    config_args = {
        'num_labels': len(CATEGORIES),
        'id2label': {k: str(v).upper() for k, v in enumerate(CATEGORIES)},
        'label2id': {str(v).upper(): k for k, v in enumerate(CATEGORIES)}
    }
    model, tokenizer = get_model_tokenizer(
        model_args, training_args, config_args=config_args, model_type='classifier')

    if training_args.max_seq_length > tokenizer.model_max_length:
        logger.warning(
            f'The max_seq_length passed ({training_args.max_seq_length}) is larger than the maximum length for the'
            f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
        )
    max_seq_length = min(training_args.max_seq_length,
                         tokenizer.model_max_length)

    def preprocess_function(examples):
        # Tokenize the texts
        result = tokenizer(
            examples['text'], padding='max_length', max_length=max_seq_length, truncation=True)
        result['label'] = examples['label']
        return result

    train_dataset, eval_dataset, predict_dataset = prepare_datasets(
        raw_datasets, dataset_args, training_args, preprocess_function)

    # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
    # predictions and label_ids field) and has to return a dictionary string to float.
    def compute_metrics(p: EvalPrediction):
        preds = p.predictions[0] if isinstance(
            p.predictions, tuple) else p.predictions
        preds = np.argmax(preds, axis=1)
        return {'accuracy': (preds == p.label_ids).astype(np.float32).mean().item()}

    # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if
    # we already did the padding.
    if training_args.fp16:
        data_collator = DataCollatorWithPadding(
            tokenizer, pad_to_multiple_of=8)
    else:
        data_collator = None

    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=compute_metrics,
        tokenizer=tokenizer,
        data_collator=data_collator,
    )

    # Training
    train_result = train_from_checkpoint(
        trainer, last_checkpoint, training_args)

    metrics = train_result.metrics
    max_train_samples = (
        training_args.max_train_samples if training_args.max_train_samples is not None else len(
            train_dataset)
    )
    metrics['train_samples'] = min(max_train_samples, len(train_dataset))

    trainer.save_model()  # Saves the tokenizer too for easy upload

    trainer.log_metrics('train', metrics)
    trainer.save_metrics('train', metrics)
    trainer.save_state()

    kwargs = {'finetuned_from': model_args.model_name_or_path,
              'tasks': 'text-classification'}
    if training_args.push_to_hub:
        trainer.push_to_hub(**kwargs)
    else:
        trainer.create_model_card(**kwargs)


if __name__ == '__main__':
    main()