#!/bin/bash BASE_DIR=../../data/seq_truncated TASKS=("DST" "T2S" "SF" "RRR" "QCR" "NLI" "MRC" "MCQA" "DS" "DCRG" "ER" "ID" "DT" "CC" "CI" "ABSA") # TASKS=("DT") TOKENIZER_PATH="../../ckpts/t5-base" OUTPUT_DIR=../../data DATASETS=() for TASK in ${TASKS[*]}; do for dataset in `ls ${BASE_DIR} | grep "^${TASK}-"`; do DATASETS+=(${BASE_DIR}/${dataset}) done done python get_statistics.py \ --input-dir-list ${DATASETS[@]} \ --tokenizer-path ${TOKENIZER_PATH} \ --output-path ${OUTPUT_DIR} \