File size: 1,723 Bytes
f2c15d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""
Prints out the ratio of activation memory for the MLP layer when using ReLU vs GELU.
"""

import torch
import torch.nn as nn

import act_mem
import layers

if __name__ == "__main__":
    batch_size, seq_len, d_model, dropout_prob = 1, 128, 1024, 0.1
    print(f"Batch size: {batch_size}, sequence length: {seq_len}, d_model: {d_model}, dropout_prob: {dropout_prob}  ")
    dtype = torch.bfloat16
    inputs = torch.randn(
        batch_size,
        seq_len,
        d_model,
        device="cuda",
        requires_grad=True,
        dtype=dtype,
    )

    act_fn_dict = {"ReLU": nn.ReLU() , "GELU": nn.GELU(), "silu": nn.SiLU()}
    # Append outputs to a list to keep tensors alive
    outputs = []
    mem_bytes = []

    for name, act_fn in act_fn_dict.items():
        if name == "silu":
            mlp = layers.SwiGLUMLP(
                d_model=d_model,
                intermediate_size=4 * d_model,
                act_fn=act_fn,
                dropout_prob=dropout_prob,
                device="cuda",
                dtype=dtype,
            )
        else:
            mlp = layers.MLP(
                d_model=d_model,
                act_fn=act_fn,
                dropout_prob=dropout_prob,
                device="cuda",
                dtype=dtype,
        )
        with act_mem.AllocatedMemContext() as mem, act_mem.SavedTensorContext(
            ignored_tensors=mlp.parameters()
        ) as saved:
            out = mlp(inputs)
            outputs.append(out)
        stm = saved.saved_tensor_mem
        assert mem.delta["current"] == stm
        print(f"{name} bytes: {act_mem.B_to_GiB(stm)}")
        mem_bytes.append(stm)

    print(f"ReLU/GELU act mem ratio: {mem_bytes[0]/mem_bytes[1]}")