File size: 1,675 Bytes
a93e458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#!/bin/bash

megatron_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/megatron_model"
sharded_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/shards"
tp="2"
pp="1"
repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron"
vocab_size="37005"

# Parse command-line arguments
for arg in "$@"
do
    case $arg in
        --help)
        echo "Usage: ./script.sh [OPTIONS]"
        echo "Options:"
        echo " --megatron_model=PATH   Path to sharded megatron model"
        echo "  --sharded_model=PATH   Path to save sharded model."
        echo "  --tp=NUMBER            Number of shards to divide model in."
        echo "  --pp=NUMBER            Pipeline parallel (default is 1)"
        echo "  --repo=PATH            Path to repo"
        echo "  --vocab_size=NUMBER    Vocab size of model without padding"
        exit 0
        ;;
        --megatron_model=*)
        megatron_model="${arg#*=}"
        shift
        ;;
        --sharded_model=*)
        sharded_model="${arg#*=}"
        shift
        ;;
        --tp=*)
        tp="${arg#*=}"
        shift
        ;;
        --pp=*)
        pp="${arg#*=}"
        shift
        ;;
        --repo=*)
        repo="${arg#*=}"
        shift
        ;;
        --vocab_size=*)
        vocab_size="${arg#*=}"
        shift
        ;;
    esac
done

python $repo/tools/checkpoint_util.py \
    --target_tensor_parallel_size $tp \
    --target_pipeline_parallel_size $pp \
    --load_dir $megatron_model \
    --save_dir $sharded_model \
    --model_type llama \
    --true_vocab_size $vocab_size \
    --bf16