File size: 2,263 Bytes
110b61c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
#!/bin/bash
#SBATCH --ntasks=1 # number of MP tasks
#SBATCH --nodes=1
#SBATCH --cpus-per-task=64 # number of cores per tasks
#SBATCH --hint=nomultithread # we get physical cores not logical
#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
#SBATCH --output=%x-%j.out # output file name
#SBATCH --account=project_462000119
#SBATCH --partition=small
set -x -e
#source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
source /scratch/project_462000119/muennighoff/nov-2022-bettercom/venv/bin/activate
export HF_DATASETS_OFFLINE=1
export TRANSFORMERS_OFFLINE=1
#MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
MEGATRON_DEEPSPEED_REPO=/scratch/project_462000119/muennighoff/nov-2022-mtf/Megatron-DeepSpeed
TOKENIZER_PATH="bigscience/tokenizer"
LANGS=(
ak
ar
as
bm
bn
ca
code
en
es
eu
fon
fr
gu
hi
id
ig
ki
kn
lg
ln
ml
mr
ne
nso
ny
or
pa
pt
rn
rw
sn
st
sw
ta
te
tn
ts
tum
tw
ur
vi
wo
xh
yo
zh
zu
)
LANGS=(
ru
)
#DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/xp3cappedmixednewcodelong
#OUTPUT=/gpfswork/rech/six/commun/bigscience-training/xp3cappedmixednewcodelong
DATA_PATH=/scratch/project_462000119/muennighoff/nov-2022-mtf/xp3ru/ru
OUTPUT=/scratch/project_462000119/muennighoff/nov-2022-mtf/xp3rumegds
mkdir -p $OUTPUT
for val in {0..1}; do
LANG=${LANGS[$val]}
cd $DATA_PATH
# Merge
cat *.jsonl > merged_dups_$LANG.jsonl
# Drop duplicates (~1G / 37G for en) + Shuffle
sort -u merged_dups_$LANG.jsonl | shuf > merged_$LANG.jsonl
cd $MEGATRON_DEEPSPEED_REPO
python tools/preprocess_data.py \
--input $DATA_PATH/merged_$LANG.jsonl \
--output-prefix $OUTPUT/xp3_$LANG \
--dataset-impl mmap \
--json-key inputs \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path $TOKENIZER_PATH \
--workers 60
python tools/preprocess_data.py \
--input $DATA_PATH/merged_$LANG.jsonl \
--output-prefix $OUTPUT/xp3_$LANG \
--dataset-impl mmap \
--json-key targets \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path $TOKENIZER_PATH \
--append-eod \
--prepend-space \
--workers 60
done
|