File size: 1,500 Bytes
a93e458 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
#!/bin/bash
dataset_json="/mnt/scratch-artemis/kshitij/oneB_experiment/new_data_wout_covost/combined/to_tokenize.jsonl"
dataset_bin="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b"
vocab_file="/mnt/scratch-artemis/kshitij/LLAMA/Megatron_LLM/temp/new_tokenizer/tokenizer.model"
repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron"
# Parse command-line arguments
for arg in "$@"
do
case $arg in
--help)
echo "Usage: ./script.sh [OPTIONS]"
echo "Options:"
echo " --dataset_json=PATH Path to dataset json."
echo " --dataset_bin=PATH Path to save preprocessed data."
echo " --vocab_file=PATH Path to tokenizer.model file of HF model to be trained."
echo " --repo=PATH Path to repo."
exit 0
;;
--dataset_json=*)
dataset_json="${arg#*=}"
shift
;;
--dataset_bin=*)
dataset_bin="${arg#*=}"
shift
;;
--vocab_file=*)
vocab_file="${arg#*=}"
shift
;;
--repo=*)
repo="${arg#*=}"
shift
;;
esac
done
echo $repo
mkdir -p $dataset_bin
python $repo/tools/preprocess_data.py \
--input=$dataset_json \
--output_prefix=$dataset_bin/data \
--tokenizer_type=SentencePieceTokenizer \
--vocab_file=$vocab_file \
--chunk_size=64 \
--workers=64 \
--append_eod \
--vocab_extra_ids 5000
|