File size: 5,782 Bytes
89bc030
 
 
 
 
 
 
 
 
 
 
 
ebfb4b3
89bc030
2ebe38b
89bc030
 
 
ebfb4b3
89bc030
 
 
ebfb4b3
89bc030
 
 
 
ebfb4b3
89bc030
 
 
ebfb4b3
89bc030
 
ebfb4b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89bc030
 
 
ebfb4b3
2178182
89bc030
 
 
 
 
 
 
 
 
 
 
 
ebfb4b3
89bc030
bc74a1c
 
 
89bc030
 
 
 
 
 
 
 
 
c03487d
 
 
 
 
 
 
 
 
 
ebfb4b3
 
 
 
 
 
 
 
 
 
 
 
 
 
c03487d
 
 
 
ebfb4b3
 
 
 
 
 
 
 
89bc030
 
 
ebfb4b3
89bc030
ebfb4b3
bc74a1c
ebfb4b3
89bc030
ebfb4b3
89bc030
 
2b544b3
89bc030
 
 
 
2178182
ebfb4b3
2b544b3
 
89bc030
 
bc74a1c
2178182
89bc030
ebfb4b3
 
 
 
 
 
 
 
 
bc74a1c
ebfb4b3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import datasets
import pickle

_DESCRIPTION = """\
    Dataset for storing training metrics of pythia models 
"""

class PythiaTrainingMetrics(datasets.GeneratorBasedBuilder):
    
    MODEL_SIZES = [ 
        "70m", 
        "160m", 
        "410m",
        "1.4b",
        "2.8b",
    ]

    _GRADIENTS_DESCRIPTION = """\
        Dataset for storing gradients of pythia models of the requested model size
    """

    _WEIGHTS_DESCRIPTION = """\
        Dataset for storing weights of pythia models  of the requested model size
    """

    _WEIGHTS_MINI_DESCRIPTION = """\
        Dataset for storing weights of pythia models (minimizes the amount of gradients per 
        checkpoint to only 2) of the requested model size
    """

    _ACTIVATIONS_DESCRIPTION = """\
        Dataset for storing activations of pythia models of the requested model size
    """
   
    BUILDER_CONFIGS = []
    for model_size in MODEL_SIZES:
        BUILDER_CONFIGS.extend([
            datasets.BuilderConfig(
                name=f"{model_size}__gradients",
                description=_WEIGHTS_DESCRIPTION,
                version="1.0.0",
            ),
            datasets.BuilderConfig(
                name=f"{model_size}__gradients_mini",
                description=_WEIGHTS_MINI_DESCRIPTION,
                version="1.0.0",
            ),
            datasets.BuilderConfig(
                name=f"{model_size}__activations",
                description=_ACTIVATIONS_DESCRIPTION,
                version="1.0.0",
            ),
            datasets.BuilderConfig(
                name=f"{model_size}__weights",
                description=_WEIGHTS_DESCRIPTION,
                version="1.0.0",
            ),
        ])

    def _info(self):
        """
        NOTE: we might want to specify features, but since the features are different for each
        model size it's annoying and kind of pointless since hf does it automatically 
        """

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
        )


    def _split_generators(self, dl_manager: datasets.DownloadManager):
        """ 
        Returns data for different splits - we define a split as a model size. 
        """

        to_download_files = []

        kwargs_checkpoint_steps = []
        kwargs_gradient_steps = [] 

        checkpoint_steps = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1000, ]
        checkpoint_steps.extend([3000 + (i * 10000) for i in range(0, 15)])

        def get_gradient_step(step: int): 
            """
            Return a list of the gradient steps that are used at a given checkpoint step. 
            """
            return list(range(max(0, step-5), min(step+6, 143_000))) 

        def get_gradient_mini_step(step: int):
            """
            Return a list of the gradient steps that are used at a given checkpoint step, we 
            limit the amount of gradients to only 2. 
            """
            if step != checkpoint_steps[-1]:
                return [step, step+1]
            else:
                return [step-2, step-1]

        model_size = self.config.name.split("__")[0]

        for checkpoint_step in checkpoint_steps:

            directory_path = f"./models/{model_size}/checkpoint_{checkpoint_step}"

            if "activations" in self.config.name:
                to_download_files.append(f"{directory_path}/checkpoint_activations.pickle")
                kwargs_checkpoint_steps.append(checkpoint_step)
            elif "weights" in self.config.name:
                to_download_files.append(f"{directory_path}/checkpoint_weights.pickle")
                kwargs_checkpoint_steps.append(checkpoint_step)
            elif "gradients" in self.config.name:
                if "mini" in self.config.name:
                    gradient_steps = get_gradient_mini_step(checkpoint_step)
                else:
                    gradient_steps = get_gradient_step(checkpoint_step)

                for gradient_step in gradient_steps:
                    to_download_files.append(f"{directory_path}/checkpoint_gradients_{gradient_step}.pickle")
                    kwargs_checkpoint_steps.append(checkpoint_step)
                    kwargs_gradient_steps.append(gradient_step)
            else: 
                raise Exception("Invalid config name")

            downloaded_files = dl_manager.download_and_extract(to_download_files)

        return [
            datasets.SplitGenerator(
                name='default',
                gen_kwargs={
                    "filepaths": downloaded_files,
                    "checkpoint_steps": kwargs_checkpoint_steps,
                    **({"gradient_steps": kwargs_gradient_steps} if "gradients" in self.config.name else {}),
                }
            )  
        ]

    def _generate_examples(self, filepaths, checkpoint_steps, **kwargs):

        # the filepaths should be a list of filepaths 
        if isinstance(filepaths, str):
            filepaths = [filepaths]

        if "gradients" in self.config.name:
            gradient_steps = kwargs["gradient_steps"]

        global_idx = 0 # the unique identifier for the example 

        for idx, filepath in enumerate(filepaths):
            with open(filepath, 'rb') as f:
                data = pickle.load(f)

                for layer_name, layer_data in data.items():
                    record = {
                        "checkpoint_step": checkpoint_steps[idx],
                        "layer_name": layer_name,
                        "data": layer_data, 
                    }
                    if "gradients" in self.config.name:
                        record['gradient_step'] = gradient_steps[idx]
               
                    yield global_idx,  record
                    global_idx += 1