dh-mc commited on
Commit
07320d0
·
1 Parent(s): 54b1b8a
.env.example ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL_NAME=internlm/internlm2_5-7b-chat-1m
2
+
3
+ OPENAI_API_KEY=
4
+ HF_TOKEN=
5
+ WANDB_API_KEY=
6
+
7
+ LOAD_IN_4BIT=false
8
+ NUM_TRAIN_EPOCHS=3
9
+
10
+ DATA_PATH=datasets/mac
11
+ RESULTS_PATH=results/mac-results.csv
llm_toolkit/eval.py CHANGED
@@ -14,7 +14,7 @@ path = os.path.dirname(found_dotenv)
14
  print(f"Adding {path} to sys.path")
15
  sys.path.append(path)
16
 
17
- from llm_toolkit.translation_engine import *
18
  from llm_toolkit.translation_utils import *
19
 
20
  model_name = os.getenv("MODEL_NAME")
@@ -43,6 +43,14 @@ print(f"{start_gpu_memory} GB of memory reserved.")
43
 
44
  datasets = load_translation_dataset(data_path, tokenizer)
45
 
 
 
 
 
 
 
 
 
46
  print("Evaluating model: " + model_name)
47
  predictions = eval_model(model, tokenizer, datasets["test"])
48
 
 
14
  print(f"Adding {path} to sys.path")
15
  sys.path.append(path)
16
 
17
+ from llm_toolkit.llm_utils import *
18
  from llm_toolkit.translation_utils import *
19
 
20
  model_name = os.getenv("MODEL_NAME")
 
43
 
44
  datasets = load_translation_dataset(data_path, tokenizer)
45
 
46
+ if len(sys.argv) > 1:
47
+ num = int(sys.argv[1])
48
+ if num > 0:
49
+ print(f"--- evaluating {num} entries")
50
+ datasets["test"] = datasets["test"].select(range(num))
51
+
52
+ print_row_details(datasets["test"].to_pandas(), indices=[0, -1])
53
+
54
  print("Evaluating model: " + model_name)
55
  predictions = eval_model(model, tokenizer, datasets["test"])
56
 
scripts/eval-mac.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ BASEDIR=$(dirname "$0")
4
+ cd $BASEDIR/..
5
+ echo Current Directory:
6
+ pwd
7
+
8
+ nvidia-smi
9
+ uname -a
10
+ cat /etc/os-release
11
+ lscpu
12
+ grep MemTotal /proc/meminfo
13
+
14
+ export EVAL_BASE_MODEL=true
15
+ export DO_FINE_TUNING=false
16
+
17
+ export MODEL_NAME=$1
18
+ echo Evaluating $MODEL_NAME
19
+ python llm_toolkit/tune_mac.py
scripts/install-cuda-torch.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ wget https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_550.54.14_linux.run && \
2
+ sudo sh cuda_12.4.0_550.54.14_linux.run && \
3
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124