inflaton commited on
Commit
62df289
·
1 Parent(s): bbea107

qwen2 72b 80% results

Browse files
llama-factory/saves/Llama3.1-70B-Chinese-Chat/trainer_log.jsonl CHANGED
@@ -45,3 +45,37 @@
45
  {"current_steps": 200, "total_steps": 350, "loss": 0.2332, "learning_rate": 4.626349532067879e-05, "epoch": 1.1371712864250179, "percentage": 57.14, "elapsed_time": "9:49:13", "remaining_time": "7:21:54", "throughput": "0.00", "total_tokens": 0}
46
  {"current_steps": 205, "total_steps": 350, "loss": 0.2216, "learning_rate": 4.378281476762576e-05, "epoch": 1.1656005685856432, "percentage": 58.57, "elapsed_time": "10:00:34", "remaining_time": "7:04:47", "throughput": "0.00", "total_tokens": 0}
47
  {"current_steps": 210, "total_steps": 350, "loss": 0.2079, "learning_rate": 4.131759111665349e-05, "epoch": 1.1940298507462686, "percentage": 60.0, "elapsed_time": "10:11:55", "remaining_time": "6:47:56", "throughput": "0.00", "total_tokens": 0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  {"current_steps": 200, "total_steps": 350, "loss": 0.2332, "learning_rate": 4.626349532067879e-05, "epoch": 1.1371712864250179, "percentage": 57.14, "elapsed_time": "9:49:13", "remaining_time": "7:21:54", "throughput": "0.00", "total_tokens": 0}
46
  {"current_steps": 205, "total_steps": 350, "loss": 0.2216, "learning_rate": 4.378281476762576e-05, "epoch": 1.1656005685856432, "percentage": 58.57, "elapsed_time": "10:00:34", "remaining_time": "7:04:47", "throughput": "0.00", "total_tokens": 0}
47
  {"current_steps": 210, "total_steps": 350, "loss": 0.2079, "learning_rate": 4.131759111665349e-05, "epoch": 1.1940298507462686, "percentage": 60.0, "elapsed_time": "10:11:55", "remaining_time": "6:47:56", "throughput": "0.00", "total_tokens": 0}
48
+ {"current_steps": 210, "total_steps": 350, "eval_loss": 0.22092726826667786, "epoch": 1.1940298507462686, "percentage": 60.0, "elapsed_time": "10:38:36", "remaining_time": "7:05:44", "throughput": "0.00", "total_tokens": 0}
49
+ {"current_steps": 215, "total_steps": 350, "loss": 0.2088, "learning_rate": 3.887395330218429e-05, "epoch": 1.2224591329068941, "percentage": 61.43, "elapsed_time": "10:50:03", "remaining_time": "6:48:10", "throughput": "0.00", "total_tokens": 0}
50
+ {"current_steps": 220, "total_steps": 350, "loss": 0.1963, "learning_rate": 3.6457976592849754e-05, "epoch": 1.2508884150675195, "percentage": 62.86, "elapsed_time": "11:01:27", "remaining_time": "6:30:51", "throughput": "0.00", "total_tokens": 0}
51
+ {"current_steps": 225, "total_steps": 350, "loss": 0.2277, "learning_rate": 3.4075667487415785e-05, "epoch": 1.279317697228145, "percentage": 64.29, "elapsed_time": "11:12:49", "remaining_time": "6:13:47", "throughput": "0.00", "total_tokens": 0}
52
+ {"current_steps": 230, "total_steps": 350, "loss": 0.2113, "learning_rate": 3.173294878168025e-05, "epoch": 1.3077469793887704, "percentage": 65.71, "elapsed_time": "11:24:11", "remaining_time": "5:56:57", "throughput": "0.00", "total_tokens": 0}
53
+ {"current_steps": 235, "total_steps": 350, "loss": 0.2135, "learning_rate": 2.9435644843469436e-05, "epoch": 1.336176261549396, "percentage": 67.14, "elapsed_time": "11:35:36", "remaining_time": "5:40:24", "throughput": "0.00", "total_tokens": 0}
54
+ {"current_steps": 240, "total_steps": 350, "loss": 0.2165, "learning_rate": 2.718946713234185e-05, "epoch": 1.3646055437100213, "percentage": 68.57, "elapsed_time": "11:46:59", "remaining_time": "5:24:02", "throughput": "0.00", "total_tokens": 0}
55
+ {"current_steps": 245, "total_steps": 350, "loss": 0.2293, "learning_rate": 2.500000000000001e-05, "epoch": 1.3930348258706466, "percentage": 70.0, "elapsed_time": "11:58:21", "remaining_time": "5:07:52", "throughput": "0.00", "total_tokens": 0}
56
+ {"current_steps": 245, "total_steps": 350, "eval_loss": 0.21432924270629883, "epoch": 1.3930348258706466, "percentage": 70.0, "elapsed_time": "12:25:04", "remaining_time": "5:19:18", "throughput": "0.00", "total_tokens": 0}
57
+ {"current_steps": 250, "total_steps": 350, "loss": 0.2125, "learning_rate": 2.2872686806712035e-05, "epoch": 1.4214641080312722, "percentage": 71.43, "elapsed_time": "12:36:33", "remaining_time": "5:02:37", "throughput": "0.00", "total_tokens": 0}
58
+ {"current_steps": 255, "total_steps": 350, "loss": 0.2045, "learning_rate": 2.0812816388260518e-05, "epoch": 1.4498933901918978, "percentage": 72.86, "elapsed_time": "12:47:54", "remaining_time": "4:46:04", "throughput": "0.00", "total_tokens": 0}
59
+ {"current_steps": 260, "total_steps": 350, "loss": 0.1835, "learning_rate": 1.8825509907063327e-05, "epoch": 1.4783226723525231, "percentage": 74.29, "elapsed_time": "12:59:16", "remaining_time": "4:29:45", "throughput": "0.00", "total_tokens": 0}
60
+ {"current_steps": 265, "total_steps": 350, "loss": 0.2045, "learning_rate": 1.691570812015704e-05, "epoch": 1.5067519545131485, "percentage": 75.71, "elapsed_time": "13:10:45", "remaining_time": "4:13:38", "throughput": "0.00", "total_tokens": 0}
61
+ {"current_steps": 270, "total_steps": 350, "loss": 0.2137, "learning_rate": 1.5088159095696363e-05, "epoch": 1.535181236673774, "percentage": 77.14, "elapsed_time": "13:22:08", "remaining_time": "3:57:40", "throughput": "0.00", "total_tokens": 0}
62
+ {"current_steps": 275, "total_steps": 350, "loss": 0.1994, "learning_rate": 1.3347406408508695e-05, "epoch": 1.5636105188343994, "percentage": 78.57, "elapsed_time": "13:33:30", "remaining_time": "3:41:52", "throughput": "0.00", "total_tokens": 0}
63
+ {"current_steps": 280, "total_steps": 350, "loss": 0.2129, "learning_rate": 1.1697777844051105e-05, "epoch": 1.5920398009950247, "percentage": 80.0, "elapsed_time": "13:44:53", "remaining_time": "3:26:13", "throughput": "0.00", "total_tokens": 0}
64
+ {"current_steps": 280, "total_steps": 350, "eval_loss": 0.2167833000421524, "epoch": 1.5920398009950247, "percentage": 80.0, "elapsed_time": "14:11:37", "remaining_time": "3:32:54", "throughput": "0.00", "total_tokens": 0}
65
+ {"current_steps": 285, "total_steps": 350, "loss": 0.2216, "learning_rate": 1.0143374638853891e-05, "epoch": 1.6204690831556503, "percentage": 81.43, "elapsed_time": "14:23:09", "remaining_time": "3:16:51", "throughput": "0.00", "total_tokens": 0}
66
+ {"current_steps": 290, "total_steps": 350, "loss": 0.2125, "learning_rate": 8.688061284200266e-06, "epoch": 1.6488983653162759, "percentage": 82.86, "elapsed_time": "14:34:30", "remaining_time": "3:00:55", "throughput": "0.00", "total_tokens": 0}
67
+ {"current_steps": 295, "total_steps": 350, "loss": 0.1947, "learning_rate": 7.33545591839222e-06, "epoch": 1.6773276474769012, "percentage": 84.29, "elapsed_time": "14:45:53", "remaining_time": "2:45:09", "throughput": "0.00", "total_tokens": 0}
68
+ {"current_steps": 300, "total_steps": 350, "loss": 0.2093, "learning_rate": 6.088921331488568e-06, "epoch": 1.7057569296375266, "percentage": 85.71, "elapsed_time": "14:57:18", "remaining_time": "2:29:33", "throughput": "0.00", "total_tokens": 0}
69
+ {"current_steps": 305, "total_steps": 350, "loss": 0.2002, "learning_rate": 4.951556604879048e-06, "epoch": 1.7341862117981521, "percentage": 87.14, "elapsed_time": "15:08:39", "remaining_time": "2:14:03", "throughput": "0.00", "total_tokens": 0}
70
+ {"current_steps": 310, "total_steps": 350, "loss": 0.1749, "learning_rate": 3.9261894064796135e-06, "epoch": 1.7626154939587777, "percentage": 88.57, "elapsed_time": "15:20:06", "remaining_time": "1:58:43", "throughput": "0.00", "total_tokens": 0}
71
+ {"current_steps": 315, "total_steps": 350, "loss": 0.1974, "learning_rate": 3.0153689607045845e-06, "epoch": 1.7910447761194028, "percentage": 90.0, "elapsed_time": "15:31:29", "remaining_time": "1:43:29", "throughput": "0.00", "total_tokens": 0}
72
+ {"current_steps": 315, "total_steps": 350, "eval_loss": 0.21316008269786835, "epoch": 1.7910447761194028, "percentage": 90.0, "elapsed_time": "15:58:13", "remaining_time": "1:46:28", "throughput": "0.00", "total_tokens": 0}
73
+ {"current_steps": 320, "total_steps": 350, "loss": 0.2004, "learning_rate": 2.221359710692961e-06, "epoch": 1.8194740582800284, "percentage": 91.43, "elapsed_time": "16:09:42", "remaining_time": "1:30:54", "throughput": "0.00", "total_tokens": 0}
74
+ {"current_steps": 325, "total_steps": 350, "loss": 0.207, "learning_rate": 1.5461356885461075e-06, "epoch": 1.847903340440654, "percentage": 92.86, "elapsed_time": "16:21:03", "remaining_time": "1:15:27", "throughput": "0.00", "total_tokens": 0}
75
+ {"current_steps": 330, "total_steps": 350, "loss": 0.1933, "learning_rate": 9.913756075728087e-07, "epoch": 1.8763326226012793, "percentage": 94.29, "elapsed_time": "16:32:25", "remaining_time": "1:00:08", "throughput": "0.00", "total_tokens": 0}
76
+ {"current_steps": 335, "total_steps": 350, "loss": 0.2096, "learning_rate": 5.584586887435739e-07, "epoch": 1.9047619047619047, "percentage": 95.71, "elapsed_time": "16:43:51", "remaining_time": "0:44:56", "throughput": "0.00", "total_tokens": 0}
77
+ {"current_steps": 340, "total_steps": 350, "loss": 0.1997, "learning_rate": 2.4846123172992954e-07, "epoch": 1.9331911869225302, "percentage": 97.14, "elapsed_time": "16:55:17", "remaining_time": "0:29:51", "throughput": "0.00", "total_tokens": 0}
78
+ {"current_steps": 345, "total_steps": 350, "loss": 0.2084, "learning_rate": 6.215393905388278e-08, "epoch": 1.9616204690831558, "percentage": 98.57, "elapsed_time": "17:06:40", "remaining_time": "0:14:52", "throughput": "0.00", "total_tokens": 0}
79
+ {"current_steps": 350, "total_steps": 350, "loss": 0.2068, "learning_rate": 0.0, "epoch": 1.9900497512437811, "percentage": 100.0, "elapsed_time": "17:18:06", "remaining_time": "0:00:00", "throughput": "0.00", "total_tokens": 0}
80
+ {"current_steps": 350, "total_steps": 350, "eval_loss": 0.2134595662355423, "epoch": 1.9900497512437811, "percentage": 100.0, "elapsed_time": "17:44:51", "remaining_time": "0:00:00", "throughput": "0.00", "total_tokens": 0}
81
+ {"current_steps": 350, "total_steps": 350, "epoch": 1.9900497512437811, "percentage": 100.0, "elapsed_time": "17:44:54", "remaining_time": "0:00:00", "throughput": "0.00", "total_tokens": 0}
llama-factory/saves/Llama3.1-8B-Chinese-Chat DELETED
@@ -1 +0,0 @@
1
- llama3.1_8b/lora/sft_p2
 
 
logs/Qwen2-72B-Instruct_epoch_0.txt CHANGED
@@ -2,162 +2,10 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct None True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: None
8
- 09/08/2024 11:25:43 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 11:25:43 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 11:25:43 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 11:25:43 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 11:38:12 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 11:38:12 - INFO - llamafactory.model.loader - all params: 72,706,203,648
14
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
15
- 42.426 GB of memory reserved.
16
- loading train/test data files
17
- DatasetDict({
18
- train: Dataset({
19
- features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],
20
- num_rows: 25000
21
- })
22
- test: Dataset({
23
- features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],
24
- num_rows: 3000
25
- })
26
- })
27
- --------------------------------------------------
28
- text: 甄加索是自杀吗
29
- --------------------------------------------------
30
- label: 不是
31
- --------------------------------------------------
32
- answer: nan
33
- --------------------------------------------------
34
- title: 海岸之谜
35
- --------------------------------------------------
36
- puzzle: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?
37
- --------------------------------------------------
38
- truth: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。
39
- --------------------------------------------------
40
- train_text: <|im_start|>system
41
- You are an expert in logical reasoning.<|im_end|>
42
- <|im_start|>user
43
- 你是一个情景猜谜游戏的主持人。游戏规则如下:
44
-
45
- 1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
46
- 2. 主持人知道谜底,谜底是谜面的答案。
47
- 3. 参与者可以询问任何封闭式问题来找寻事件的真相。
48
- 4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
49
- - 若谜面和谜底能找到问题的答案,回答:是或者不是
50
- - 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
51
- - 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
52
- - 若参与者提问基本还原了谜底真相,回答:回答正确
53
- 5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
54
-
55
- 请严格按照这些规则回答参与者提出的问题。
56
-
57
- **谜面:** 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?
58
-
59
- **谜底:** 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。
60
-
61
- **参与者提出的问题:** 甄加索是自杀吗
62
- <|im_end|>
63
- <|im_start|>assistant
64
- 不是<|im_end|>
65
- --------------------------------------------------
66
- prompt: <|im_start|>system
67
- You are an expert in logical reasoning.<|im_end|>
68
- <|im_start|>user
69
- 你是一个情景猜谜游戏的主持人。游戏规则如下:
70
-
71
- 1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
72
- 2. 主持人知道谜底,谜底是谜面的答案。
73
- 3. 参与者可以询问任何封闭式问题来找寻事件的真相。
74
- 4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
75
- - 若谜面和谜底能找到问题的答案,回答:是或者不是
76
- - 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
77
- - 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
78
- - 若参与者提问基本还原了谜底真相,回答:回答正确
79
- 5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
80
-
81
- 请严格按照这些规则回答参与者提出的问题。
82
-
83
- **谜面:** 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?
84
-
85
- **谜底:** 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。
86
-
87
- **参与者提出的问题:** 甄加索是自杀吗
88
- <|im_end|>
89
- <|im_start|>assistant
90
-
91
- --------------------------------------------------
92
- text: 死者受伤了吗
93
- --------------------------------------------------
94
- label: 不是
95
- --------------------------------------------------
96
- answer: nan
97
- --------------------------------------------------
98
- title: 甄庄哭声
99
- --------------------------------------------------
100
- puzzle: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。
101
- --------------------------------------------------
102
- truth: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。
103
- --------------------------------------------------
104
- train_text: <|im_start|>system
105
- You are an expert in logical reasoning.<|im_end|>
106
- <|im_start|>user
107
- 你是一个情景猜谜游戏的主持人。游戏规则如下:
108
-
109
- 1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
110
- 2. 主持人知道谜底,谜底是谜面的答案。
111
- 3. 参与者可以询问任何封闭式问题来找寻事件的真相。
112
- 4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
113
- - 若谜面和谜底能找到问题的答案,回答:是或者不是
114
- - 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
115
- - 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
116
- - 若参与者提问基本还原了谜底真相,回答:回答正确
117
- 5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
118
-
119
- 请严格按照这些规则回答参与者提出的问题。
120
-
121
- **谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。
122
-
123
- **谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,��男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。
124
-
125
- **参与者提出的问题:** 死者受伤了吗
126
- <|im_end|>
127
- <|im_start|>assistant
128
- 不是<|im_end|>
129
- --------------------------------------------------
130
- prompt: <|im_start|>system
131
- You are an expert in logical reasoning.<|im_end|>
132
- <|im_start|>user
133
- 你是一个情景猜谜游戏的主持人。游戏规则如下:
134
-
135
- 1. 参与者会得到一个谜面,谜面会描述一个简单又难以理解的事件。
136
- 2. 主持人知道谜底,谜底是谜面的答案。
137
- 3. 参与者可以询问任何封闭式问题来找寻事件的真相。
138
- 4. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。各回答的判断标准如下:
139
- - 若谜面和谜底能找到问题的答案,回答:是或者不是
140
- - 若谜面和谜底不能直接或者间接推断出问题的答案,回答:不重要
141
- - 若参与者提问不是一个封闭式问题或者问题难以理解,回答:问法错误
142
- - 若参与者提问基本还原了谜底真相,回答:回答正确
143
- 5. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。
144
-
145
- 请严格按照这些规则回答参与者提出的问题。
146
-
147
- **谜面:** 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。
148
-
149
- **谜底:** 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。
150
-
151
- **参与者提出的问题:** 死者受伤了吗
152
- <|im_end|>
153
- <|im_start|>assistant
154
-
155
- Evaluating model: Qwen/Qwen2-72B-Instruct
156
- Batch output: ['不是', '是']
157
- (3) GPU = NVIDIA L40. Max memory = 44.309 GB.
158
- 43.742 GB of memory reserved.
159
- text ... Qwen/Qwen2-72B-Instruct_torch.bfloat16_4bit_lf
160
- 0 甄加索是自杀吗 ... 不是
161
-
162
- [1 rows x 6 columns]
163
- {'accuracy': 0.7486666666666667, 'incorrect_ids': [9, 11, 18, 25, 26, 29, 31, 36, 38, 55, 58, 59, 61, 65, 66, 67, 78, 81, 82, 83, 84, 88, 91, 93, 99, 102, 103, 104, 105, 106, 113, 115, 117, 118, 120, 121, 124, 131, 141, 143, 150, 153, 155, 161, 164, 173, 190, 192, 193, 198, 199, 200, 201, 202, 222, 224, 228, 230, 231, 234, 236, 240, 245, 248, 250, 251, 255, 257, 259, 260, 263, 265, 269, 271, 275, 284, 286, 292, 293, 295, 299, 301, 314, 317, 318, 320, 328, 330, 334, 335, 350, 355, 356, 360, 362, 364, 368, 370, 371, 374, 375, 377, 383, 384, 386, 389, 396, 397, 409, 410, 414, 421, 428, 429, 430, 438, 445, 447, 449, 450, 452, 453, 454, 456, 457, 458, 461, 464, 465, 467, 471, 472, 475, 476, 477, 481, 484, 487, 488, 490, 492, 493, 494, 497, 498, 500, 501, 502, 503, 504, 506, 507, 508, 516, 517, 518, 519, 520, 536, 538, 540, 543, 553, 560, 566, 570, 571, 579, 581, 589, 591, 596, 597, 598, 600, 601, 612, 613, 614, 621, 622, 625, 628, 632, 633, 635, 636, 639, 643, 644, 647, 650, 674, 686, 692, 693, 694, 695, 700, 707, 718, 720, 721, 727, 730, 734, 738, 739, 740, 742, 748, 754, 770, 773, 774, 778, 780, 788, 791, 795, 798, 801, 809, 817, 819, 820, 821, 823, 824, 827, 828, 837, 840, 841, 847, 849, 856, 861, 864, 865, 866, 869, 870, 875, 886, 889, 890, 899, 901, 904, 909, 922, 924, 927, 932, 934, 937, 940, 941, 945, 953, 956, 958, 962, 966, 969, 980, 981, 982, 983, 986, 993, 994, 998, 999, 1003, 1004, 1011, 1012, 1014, 1018, 1019, 1022, 1024, 1028, 1032, 1036, 1043, 1049, 1051, 1055, 1057, 1061, 1066, 1069, 1076, 1077, 1078, 1087, 1089, 1091, 1098, 1101, 1107, 1117, 1120, 1121, 1125, 1126, 1141, 1143, 1158, 1161, 1164, 1166, 1170, 1172, 1174, 1176, 1177, 1178, 1180, 1181, 1183, 1185, 1193, 1196, 1198, 1203, 1209, 1212, 1217, 1220, 1221, 1222, 1228, 1229, 1232, 1236, 1237, 1241, 1242, 1245, 1247, 1251, 1252, 1254, 1257, 1259, 1266, 1273, 1274, 1276, 1278, 1282, 1283, 1289, 1292, 1296, 1298, 1299, 1304, 1305, 1307, 1308, 1311, 1313, 1316, 1317, 1322, 1323, 1324, 1326, 1327, 1331, 1339, 1340, 1342, 1349, 1353, 1358, 1361, 1364, 1373, 1379, 1385, 1387, 1389, 1392, 1393, 1395, 1402, 1404, 1406, 1410, 1412, 1416, 1420, 1422, 1424, 1426, 1427, 1432, 1433, 1436, 1440, 1443, 1444, 1446, 1448, 1449, 1451, 1452, 1453, 1454, 1462, 1464, 1469, 1475, 1476, 1485, 1487, 1490, 1494, 1495, 1496, 1499, 1500, 1506, 1515, 1516, 1517, 1518, 1522, 1524, 1525, 1526, 1547, 1548, 1551, 1554, 1555, 1556, 1558, 1560, 1561, 1562, 1568, 1576, 1577, 1578, 1580, 1581, 1585, 1589, 1590, 1591, 1594, 1596, 1604, 1605, 1613, 1614, 1624, 1628, 1629, 1631, 1633, 1635, 1637, 1639, 1641, 1645, 1647, 1648, 1650, 1651, 1654, 1655, 1658, 1660, 1662, 1665, 1669, 1672, 1673, 1675, 1679, 1686, 1690, 1691, 1695, 1712, 1713, 1716, 1726, 1727, 1741, 1751, 1755, 1756, 1758, 1773, 1785, 1791, 1793, 1800, 1806, 1810, 1811, 1812, 1824, 1827, 1835, 1858, 1863, 1867, 1884, 1888, 1897, 1899, 1907, 1915, 1918, 1919, 1930, 1933, 1945, 1953, 1956, 1958, 1962, 1963, 1964, 1965, 1977, 1978, 1981, 1984, 1989, 1990, 1994, 1995, 2001, 2009, 2017, 2028, 2035, 2036, 2038, 2044, 2046, 2054, 2059, 2064, 2067, 2076, 2077, 2085, 2088, 2105, 2107, 2109, 2114, 2119, 2120, 2121, 2125, 2126, 2133, 2141, 2147, 2157, 2159, 2161, 2162, 2164, 2167, 2174, 2177, 2182, 2183, 2186, 2187, 2188, 2192, 2193, 2194, 2197, 2210, 2212, 2215, 2226, 2229, 2237, 2240, 2244, 2249, 2260, 2262, 2263, 2265, 2274, 2278, 2281, 2287, 2297, 2311, 2312, 2318, 2320, 2322, 2324, 2330, 2333, 2339, 2344, 2348, 2354, 2357, 2360, 2366, 2373, 2381, 2385, 2395, 2396, 2400, 2404, 2406, 2409, 2410, 2414, 2423, 2425, 2429, 2433, 2442, 2444, 2445, 2446, 2448, 2465, 2471, 2472, 2480, 2486, 2502, 2503, 2511, 2515, 2517, 2520, 2522, 2524, 2529, 2535, 2538, 2542, 2559, 2575, 2581, 2589, 2600, 2604, 2605, 2606, 2607, 2610, 2616, 2617, 2626, 2629, 2634, 2640, 2644, 2645, 2653, 2663, 2664, 2667, 2672, 2676, 2678, 2681, 2682, 2687, 2706, 2708, 2714, 2727, 2731, 2736, 2745, 2749, 2754, 2756, 2757, 2758, 2762, 2766, 2769, 2787, 2788, 2794, 2798, 2801, 2806, 2807, 2810, 2811, 2814, 2815, 2816, 2823, 2824, 2829, 2837, 2842, 2843, 2850, 2851, 2852, 2854, 2861, 2873, 2875, 2877, 2880, 2882, 2884, 2887, 2902, 2905, 2906, 2912, 2913, 2915, 2916, 2919, 2921, 2929, 2931, 2944, 2949, 2953, 2963, 2966, 2969, 2975, 2977, 2979, 2985, 2995]}
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct None True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: None
8
+ 09/09/2024 14:38:58 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 14:38:58 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 14:38:58 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 14:38:58 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
logs/Qwen2-72B-Instruct_epoch_1.txt CHANGED
@@ -2,17 +2,17 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-35 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-35
8
- 09/08/2024 13:32:17 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 13:32:17 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 13:32:17 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 13:32:17 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 13:44:48 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 13:44:50 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-35
14
- 09/08/2024 13:44:50 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
@@ -155,3 +155,10 @@ You are an expert in logical reasoning.<|im_end|>
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
 
 
 
 
 
 
 
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-35 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-35
8
+ 09/09/2024 06:51:07 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 06:51:07 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 06:51:08 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 06:51:08 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
+ 09/09/2024 07:01:54 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
+ 09/09/2024 07:01:55 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-35
14
+ 09/09/2024 07:01:55 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
+ (2) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
 
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
158
+ (3) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
159
+ 49.064 GB of memory reserved.
160
+ text ... Qwen/Qwen2-72B-Instruct/checkpoint-35_torch.bfloat16_4bit_lf
161
+ 0 甄加索是自杀吗 ... 不是
162
+
163
+ [1 rows x 7 columns]
164
+ {'accuracy': 0.7583333333333333, 'incorrect_ids': [9, 11, 16, 17, 27, 29, 31, 34, 36, 43, 59, 61, 65, 66, 67, 78, 81, 82, 83, 84, 88, 91, 93, 97, 102, 104, 106, 109, 115, 117, 120, 131, 138, 139, 143, 150, 155, 161, 163, 164, 179, 190, 192, 193, 199, 200, 202, 208, 222, 224, 228, 229, 245, 248, 250, 251, 255, 257, 259, 260, 262, 268, 271, 272, 278, 289, 292, 293, 295, 299, 301, 304, 311, 314, 317, 320, 323, 328, 330, 332, 334, 335, 342, 350, 353, 355, 356, 357, 360, 362, 363, 365, 368, 371, 372, 373, 374, 376, 377, 383, 384, 389, 395, 396, 397, 410, 421, 428, 429, 430, 440, 445, 447, 452, 454, 456, 458, 461, 470, 471, 473, 474, 476, 481, 484, 485, 488, 490, 492, 493, 494, 495, 496, 497, 501, 502, 503, 504, 506, 507, 508, 510, 511, 513, 514, 517, 519, 520, 530, 533, 534, 536, 538, 540, 543, 545, 553, 560, 566, 570, 571, 579, 581, 584, 589, 591, 593, 596, 598, 600, 601, 612, 613, 614, 621, 625, 626, 628, 629, 632, 644, 647, 650, 666, 674, 680, 682, 684, 686, 692, 694, 695, 697, 701, 702, 707, 721, 727, 729, 730, 731, 734, 739, 754, 768, 769, 770, 771, 774, 779, 785, 786, 790, 792, 798, 800, 801, 803, 805, 808, 809, 814, 817, 819, 820, 821, 822, 823, 824, 837, 840, 847, 849, 864, 865, 866, 870, 875, 876, 884, 888, 889, 890, 894, 899, 901, 904, 909, 913, 917, 924, 927, 930, 934, 937, 940, 945, 962, 966, 969, 980, 991, 994, 1004, 1011, 1012, 1014, 1018, 1022, 1025, 1032, 1040, 1043, 1045, 1049, 1051, 1053, 1055, 1056, 1061, 1066, 1067, 1069, 1076, 1077, 1078, 1080, 1087, 1091, 1101, 1117, 1120, 1125, 1126, 1135, 1143, 1158, 1159, 1161, 1166, 1170, 1172, 1177, 1178, 1180, 1181, 1183, 1185, 1198, 1203, 1212, 1217, 1228, 1232, 1236, 1237, 1240, 1241, 1247, 1251, 1252, 1254, 1256, 1259, 1266, 1289, 1296, 1305, 1307, 1308, 1311, 1313, 1314, 1315, 1317, 1323, 1326, 1327, 1331, 1337, 1339, 1342, 1345, 1349, 1353, 1357, 1364, 1367, 1384, 1385, 1387, 1389, 1392, 1393, 1395, 1402, 1406, 1407, 1420, 1422, 1426, 1430, 1440, 1443, 1446, 1449, 1453, 1454, 1457, 1462, 1469, 1476, 1477, 1486, 1487, 1490, 1494, 1495, 1496, 1512, 1516, 1517, 1518, 1525, 1526, 1528, 1547, 1548, 1551, 1558, 1562, 1572, 1573, 1576, 1580, 1581, 1585, 1589, 1590, 1593, 1596, 1602, 1603, 1604, 1605, 1606, 1613, 1622, 1624, 1631, 1633, 1635, 1636, 1637, 1641, 1643, 1645, 1647, 1648, 1650, 1654, 1655, 1658, 1659, 1668, 1669, 1672, 1674, 1675, 1679, 1686, 1690, 1695, 1712, 1716, 1726, 1727, 1751, 1756, 1758, 1768, 1770, 1773, 1780, 1785, 1786, 1787, 1799, 1806, 1810, 1812, 1816, 1820, 1827, 1835, 1836, 1858, 1860, 1867, 1888, 1894, 1897, 1905, 1907, 1914, 1915, 1918, 1930, 1933, 1944, 1945, 1956, 1958, 1962, 1964, 1965, 1975, 1977, 1978, 1981, 1984, 1985, 1989, 1990, 1992, 1995, 2001, 2017, 2035, 2036, 2038, 2044, 2046, 2047, 2059, 2061, 2064, 2067, 2072, 2076, 2077, 2091, 2092, 2100, 2105, 2107, 2109, 2112, 2114, 2116, 2118, 2119, 2121, 2123, 2125, 2126, 2130, 2133, 2135, 2139, 2140, 2141, 2147, 2150, 2159, 2161, 2162, 2164, 2167, 2177, 2183, 2185, 2187, 2188, 2193, 2194, 2195, 2196, 2205, 2210, 2212, 2214, 2215, 2221, 2226, 2229, 2230, 2237, 2240, 2247, 2260, 2261, 2262, 2265, 2274, 2276, 2281, 2287, 2297, 2304, 2311, 2312, 2313, 2318, 2320, 2322, 2324, 2330, 2333, 2344, 2348, 2354, 2357, 2359, 2360, 2364, 2366, 2373, 2385, 2395, 2396, 2400, 2404, 2405, 2406, 2409, 2410, 2423, 2424, 2425, 2429, 2437, 2440, 2441, 2442, 2444, 2448, 2463, 2469, 2471, 2472, 2486, 2491, 2493, 2501, 2508, 2511, 2515, 2517, 2522, 2526, 2529, 2532, 2535, 2538, 2539, 2548, 2549, 2555, 2556, 2559, 2560, 2562, 2575, 2581, 2589, 2595, 2600, 2604, 2605, 2608, 2610, 2623, 2624, 2629, 2630, 2644, 2660, 2663, 2667, 2671, 2672, 2676, 2678, 2681, 2687, 2710, 2714, 2715, 2727, 2733, 2735, 2736, 2744, 2745, 2749, 2751, 2754, 2757, 2758, 2760, 2764, 2766, 2770, 2786, 2788, 2798, 2803, 2806, 2807, 2811, 2815, 2816, 2818, 2823, 2831, 2837, 2842, 2843, 2844, 2851, 2852, 2853, 2854, 2856, 2857, 2858, 2877, 2880, 2882, 2884, 2888, 2899, 2902, 2905, 2906, 2912, 2913, 2915, 2916, 2917, 2919, 2921, 2931, 2933, 2937, 2944, 2949, 2953, 2965, 2966, 2969, 2973, 2975, 2976, 2977, 2979, 2980, 2981, 2983, 2985, 2990, 2991, 2995, 2999]}
logs/Qwen2-72B-Instruct_epoch_2.txt CHANGED
@@ -2,17 +2,17 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-70 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-70
8
- 09/08/2024 13:49:08 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 13:49:08 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 13:49:08 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 13:49:08 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 14:01:17 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 14:01:19 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-70
14
- 09/08/2024 14:01:19 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
@@ -155,3 +155,10 @@ You are an expert in logical reasoning.<|im_end|>
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
 
 
 
 
 
 
 
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-70 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-70
8
+ 09/09/2024 07:49:51 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 07:49:51 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 07:49:52 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 07:49:52 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
+ 09/09/2024 08:00:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
+ 09/09/2024 08:00:25 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-70
14
+ 09/09/2024 08:00:25 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
+ (2) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
 
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
158
+ (3) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
159
+ 49.064 GB of memory reserved.
160
+ text ... Qwen/Qwen2-72B-Instruct/checkpoint-70_torch.bfloat16_4bit_lf
161
+ 0 甄加索是自杀吗 ... 不是
162
+
163
+ [1 rows x 8 columns]
164
+ {'accuracy': 0.7366666666666667, 'incorrect_ids': [9, 11, 16, 19, 24, 25, 26, 27, 29, 31, 32, 34, 36, 38, 58, 59, 61, 65, 66, 67, 75, 78, 82, 83, 88, 91, 93, 97, 99, 104, 106, 108, 109, 112, 114, 115, 118, 119, 120, 121, 128, 129, 131, 135, 138, 139, 143, 150, 155, 161, 163, 164, 173, 179, 190, 192, 193, 198, 199, 200, 201, 202, 209, 224, 225, 227, 229, 243, 245, 248, 250, 251, 254, 255, 257, 259, 260, 262, 265, 271, 283, 286, 291, 292, 294, 295, 299, 304, 311, 314, 317, 321, 323, 328, 330, 332, 334, 335, 342, 350, 351, 352, 353, 354, 355, 356, 360, 362, 363, 365, 368, 369, 373, 376, 377, 383, 389, 393, 395, 396, 397, 406, 409, 410, 416, 421, 428, 429, 430, 445, 447, 449, 451, 454, 456, 458, 461, 465, 467, 471, 473, 474, 476, 479, 480, 481, 485, 488, 490, 492, 493, 494, 495, 496, 499, 501, 502, 506, 507, 509, 510, 511, 512, 514, 515, 517, 519, 520, 530, 534, 536, 540, 553, 560, 566, 568, 570, 571, 579, 581, 589, 591, 592, 593, 594, 596, 597, 601, 612, 613, 614, 621, 624, 626, 628, 629, 632, 644, 647, 650, 663, 666, 682, 684, 686, 692, 693, 694, 695, 701, 702, 707, 710, 720, 721, 722, 727, 729, 730, 731, 732, 734, 739, 741, 752, 754, 770, 774, 779, 781, 785, 788, 796, 798, 801, 805, 809, 813, 817, 818, 820, 821, 822, 823, 824, 833, 837, 840, 842, 847, 856, 859, 863, 864, 866, 869, 870, 875, 876, 884, 889, 890, 901, 904, 906, 909, 913, 927, 935, 937, 940, 941, 945, 952, 953, 966, 968, 969, 980, 988, 994, 998, 1012, 1014, 1017, 1018, 1019, 1022, 1031, 1032, 1036, 1040, 1043, 1046, 1049, 1051, 1053, 1056, 1057, 1061, 1066, 1069, 1076, 1077, 1080, 1087, 1089, 1107, 1116, 1117, 1120, 1125, 1126, 1129, 1141, 1143, 1158, 1163, 1166, 1170, 1172, 1176, 1177, 1178, 1181, 1183, 1185, 1196, 1202, 1203, 1212, 1216, 1217, 1221, 1228, 1232, 1236, 1237, 1239, 1240, 1241, 1243, 1245, 1251, 1252, 1254, 1256, 1259, 1277, 1282, 1289, 1292, 1300, 1305, 1307, 1308, 1311, 1313, 1315, 1317, 1324, 1326, 1327, 1331, 1335, 1337, 1339, 1342, 1345, 1347, 1349, 1353, 1356, 1362, 1364, 1367, 1368, 1370, 1379, 1380, 1384, 1385, 1386, 1387, 1391, 1392, 1393, 1395, 1402, 1406, 1413, 1416, 1418, 1420, 1422, 1425, 1426, 1427, 1428, 1430, 1431, 1438, 1440, 1444, 1449, 1451, 1452, 1453, 1454, 1456, 1457, 1462, 1468, 1469, 1473, 1475, 1476, 1481, 1485, 1487, 1490, 1494, 1495, 1496, 1512, 1515, 1516, 1517, 1518, 1525, 1526, 1533, 1540, 1547, 1548, 1551, 1554, 1558, 1560, 1562, 1565, 1572, 1576, 1585, 1586, 1590, 1591, 1593, 1594, 1604, 1605, 1606, 1613, 1614, 1622, 1624, 1627, 1633, 1636, 1637, 1641, 1645, 1647, 1648, 1650, 1654, 1655, 1658, 1659, 1660, 1668, 1669, 1672, 1673, 1674, 1679, 1686, 1690, 1691, 1695, 1712, 1713, 1716, 1718, 1726, 1727, 1751, 1756, 1758, 1768, 1770, 1780, 1785, 1786, 1793, 1796, 1797, 1799, 1806, 1812, 1816, 1820, 1827, 1835, 1836, 1841, 1845, 1851, 1858, 1860, 1867, 1869, 1872, 1907, 1914, 1915, 1918, 1929, 1933, 1944, 1945, 1958, 1962, 1964, 1965, 1978, 1984, 1989, 1990, 1992, 1995, 1998, 2001, 2014, 2015, 2017, 2020, 2025, 2029, 2035, 2036, 2038, 2046, 2053, 2054, 2059, 2062, 2064, 2072, 2076, 2077, 2091, 2092, 2102, 2105, 2107, 2109, 2112, 2114, 2118, 2119, 2121, 2126, 2128, 2133, 2135, 2140, 2141, 2144, 2145, 2147, 2150, 2161, 2162, 2164, 2167, 2177, 2180, 2183, 2185, 2187, 2188, 2189, 2193, 2194, 2195, 2196, 2205, 2210, 2212, 2214, 2226, 2229, 2230, 2234, 2237, 2240, 2247, 2250, 2261, 2262, 2265, 2274, 2280, 2287, 2293, 2297, 2301, 2312, 2313, 2318, 2320, 2322, 2324, 2326, 2330, 2333, 2339, 2348, 2359, 2362, 2364, 2366, 2369, 2373, 2385, 2388, 2395, 2400, 2404, 2406, 2409, 2410, 2423, 2424, 2425, 2429, 2433, 2435, 2437, 2440, 2441, 2442, 2445, 2463, 2469, 2471, 2475, 2477, 2484, 2486, 2488, 2491, 2502, 2506, 2508, 2515, 2517, 2520, 2522, 2529, 2530, 2532, 2534, 2535, 2537, 2538, 2545, 2548, 2549, 2554, 2556, 2557, 2559, 2560, 2562, 2563, 2565, 2575, 2581, 2588, 2589, 2593, 2600, 2604, 2616, 2617, 2624, 2629, 2630, 2632, 2653, 2655, 2660, 2661, 2663, 2667, 2670, 2672, 2676, 2678, 2681, 2682, 2686, 2687, 2704, 2707, 2710, 2714, 2727, 2735, 2736, 2744, 2745, 2749, 2751, 2754, 2756, 2757, 2760, 2762, 2764, 2766, 2769, 2772, 2781, 2786, 2788, 2797, 2798, 2801, 2803, 2806, 2807, 2811, 2812, 2814, 2815, 2816, 2823, 2824, 2837, 2843, 2844, 2845, 2851, 2852, 2854, 2856, 2857, 2858, 2861, 2877, 2880, 2882, 2884, 2887, 2888, 2891, 2896, 2899, 2902, 2905, 2906, 2912, 2913, 2915, 2916, 2919, 2921, 2931, 2933, 2937, 2949, 2950, 2953, 2966, 2969, 2973, 2975, 2976, 2977, 2979, 2980, 2981, 2983, 2985, 2988, 2991, 2995]}
logs/Qwen2-72B-Instruct_epoch_3.txt CHANGED
@@ -2,17 +2,17 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-105 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-105
8
- 09/08/2024 14:05:43 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 14:05:43 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 14:05:43 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 14:05:43 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 14:17:06 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 14:17:08 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-105
14
- 09/08/2024 14:17:08 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
@@ -155,3 +155,10 @@ You are an expert in logical reasoning.<|im_end|>
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
 
 
 
 
 
 
 
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-105 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-105
8
+ 09/09/2024 08:49:36 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 08:49:36 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 08:49:36 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 08:49:36 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
+ 09/09/2024 09:00:16 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
+ 09/09/2024 09:00:18 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-105
14
+ 09/09/2024 09:00:18 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
+ (2) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
 
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
158
+ (3) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
159
+ 49.064 GB of memory reserved.
160
+ text ... Qwen/Qwen2-72B-Instruct/checkpoint-105_torch.bfloat16_4bit_lf
161
+ 0 甄加索是自杀吗 ... 不是
162
+
163
+ [1 rows x 9 columns]
164
+ {'accuracy': 0.757, 'incorrect_ids': [6, 9, 11, 16, 29, 31, 34, 35, 36, 59, 60, 61, 65, 66, 67, 78, 83, 91, 93, 94, 97, 104, 106, 108, 112, 114, 115, 117, 118, 119, 120, 124, 128, 129, 131, 135, 139, 143, 150, 155, 160, 161, 163, 164, 179, 190, 199, 200, 202, 218, 224, 225, 229, 234, 235, 241, 243, 245, 248, 250, 254, 255, 259, 260, 261, 269, 271, 289, 292, 293, 295, 299, 304, 314, 321, 323, 328, 330, 334, 335, 342, 350, 355, 356, 360, 362, 365, 368, 369, 370, 372, 373, 377, 383, 389, 393, 395, 396, 397, 410, 414, 423, 428, 429, 430, 445, 447, 449, 452, 454, 456, 458, 461, 471, 473, 474, 476, 477, 481, 482, 483, 485, 486, 490, 492, 493, 494, 495, 499, 500, 501, 502, 506, 507, 508, 510, 511, 514, 517, 519, 520, 528, 533, 534, 536, 538, 540, 560, 561, 564, 566, 570, 571, 573, 579, 581, 589, 591, 592, 596, 597, 598, 600, 601, 610, 613, 614, 621, 622, 625, 628, 629, 632, 635, 643, 644, 647, 649, 650, 663, 666, 667, 670, 681, 682, 684, 686, 687, 692, 693, 694, 695, 702, 705, 707, 716, 720, 721, 722, 727, 729, 730, 732, 734, 739, 740, 752, 754, 770, 774, 778, 779, 780, 781, 785, 788, 789, 796, 798, 801, 805, 809, 810, 817, 819, 820, 821, 822, 823, 824, 827, 828, 837, 840, 841, 842, 847, 866, 870, 873, 876, 884, 886, 889, 890, 894, 899, 901, 904, 906, 909, 912, 913, 924, 927, 930, 935, 937, 940, 945, 958, 962, 966, 969, 973, 980, 991, 994, 998, 1004, 1006, 1007, 1011, 1012, 1014, 1019, 1024, 1031, 1032, 1036, 1040, 1043, 1045, 1049, 1051, 1053, 1057, 1061, 1069, 1071, 1080, 1087, 1097, 1107, 1111, 1116, 1120, 1125, 1126, 1139, 1158, 1166, 1167, 1172, 1177, 1178, 1180, 1181, 1183, 1185, 1193, 1203, 1209, 1212, 1228, 1232, 1236, 1239, 1240, 1241, 1242, 1245, 1247, 1251, 1252, 1254, 1255, 1256, 1259, 1266, 1282, 1289, 1296, 1300, 1305, 1308, 1311, 1313, 1314, 1315, 1317, 1326, 1327, 1331, 1335, 1339, 1342, 1345, 1349, 1353, 1356, 1363, 1380, 1385, 1386, 1387, 1391, 1392, 1402, 1406, 1407, 1410, 1412, 1418, 1420, 1422, 1426, 1430, 1440, 1444, 1447, 1453, 1454, 1457, 1458, 1462, 1469, 1473, 1476, 1481, 1485, 1487, 1490, 1494, 1496, 1512, 1515, 1517, 1518, 1522, 1525, 1526, 1528, 1533, 1547, 1548, 1551, 1554, 1561, 1562, 1568, 1572, 1576, 1580, 1581, 1585, 1586, 1587, 1590, 1593, 1602, 1603, 1605, 1606, 1613, 1622, 1627, 1633, 1635, 1636, 1637, 1639, 1641, 1647, 1648, 1650, 1654, 1655, 1658, 1659, 1668, 1669, 1672, 1673, 1674, 1679, 1683, 1686, 1690, 1695, 1700, 1701, 1713, 1716, 1726, 1727, 1728, 1751, 1755, 1756, 1768, 1770, 1780, 1785, 1786, 1787, 1796, 1797, 1812, 1816, 1820, 1825, 1827, 1835, 1836, 1837, 1841, 1848, 1851, 1858, 1860, 1869, 1897, 1907, 1914, 1924, 1930, 1933, 1934, 1943, 1958, 1964, 1965, 1978, 1981, 1982, 1984, 1990, 1992, 1995, 1996, 2001, 2003, 2014, 2017, 2022, 2025, 2035, 2036, 2038, 2046, 2059, 2061, 2064, 2072, 2077, 2091, 2092, 2094, 2100, 2102, 2105, 2107, 2109, 2112, 2114, 2118, 2119, 2121, 2125, 2126, 2130, 2133, 2135, 2139, 2140, 2141, 2145, 2147, 2161, 2162, 2164, 2167, 2177, 2180, 2181, 2183, 2185, 2186, 2187, 2189, 2193, 2195, 2196, 2209, 2210, 2212, 2229, 2230, 2237, 2240, 2244, 2255, 2261, 2262, 2264, 2265, 2274, 2280, 2287, 2293, 2297, 2301, 2311, 2312, 2313, 2318, 2320, 2322, 2324, 2330, 2333, 2339, 2340, 2345, 2348, 2354, 2359, 2360, 2364, 2366, 2369, 2373, 2388, 2395, 2396, 2400, 2405, 2406, 2409, 2410, 2423, 2424, 2425, 2429, 2437, 2440, 2441, 2442, 2445, 2448, 2463, 2469, 2471, 2484, 2486, 2501, 2508, 2511, 2515, 2517, 2520, 2522, 2524, 2526, 2529, 2530, 2532, 2534, 2535, 2538, 2539, 2542, 2547, 2548, 2549, 2556, 2559, 2560, 2562, 2563, 2566, 2575, 2581, 2589, 2593, 2600, 2604, 2607, 2610, 2616, 2617, 2624, 2629, 2630, 2632, 2639, 2653, 2660, 2663, 2672, 2676, 2714, 2727, 2731, 2735, 2736, 2744, 2745, 2749, 2751, 2754, 2756, 2757, 2758, 2760, 2762, 2764, 2766, 2770, 2788, 2798, 2803, 2806, 2807, 2811, 2812, 2814, 2815, 2816, 2823, 2824, 2837, 2843, 2844, 2845, 2852, 2857, 2858, 2861, 2877, 2880, 2882, 2884, 2888, 2891, 2899, 2902, 2905, 2906, 2908, 2912, 2913, 2915, 2916, 2921, 2926, 2931, 2933, 2944, 2949, 2950, 2953, 2962, 2965, 2966, 2969, 2975, 2976, 2977, 2980, 2981, 2983, 2985, 2988, 2990, 2991, 2995, 2999]}
logs/Qwen2-72B-Instruct_epoch_4.txt CHANGED
@@ -2,17 +2,17 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-140 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-140
8
- 09/08/2024 14:21:26 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 14:21:26 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 14:21:26 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 14:21:26 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 14:32:05 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 14:32:07 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-140
14
- 09/08/2024 14:32:07 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
@@ -155,3 +155,10 @@ You are an expert in logical reasoning.<|im_end|>
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
 
 
 
 
 
 
 
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-140 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-140
8
+ 09/09/2024 09:48:57 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 09:48:57 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 09:48:58 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 09:48:58 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
+ 09/09/2024 09:59:34 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
+ 09/09/2024 09:59:36 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-140
14
+ 09/09/2024 09:59:36 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
+ (2) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
 
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
158
+ (3) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
159
+ 49.064 GB of memory reserved.
160
+ text ... Qwen/Qwen2-72B-Instruct/checkpoint-140_torch.bfloat16_4bit_lf
161
+ 0 甄加索是自杀吗 ... 不是
162
+
163
+ [1 rows x 10 columns]
164
+ {'accuracy': 0.7893333333333333, 'incorrect_ids': [6, 10, 11, 18, 24, 27, 29, 31, 34, 36, 52, 55, 58, 59, 61, 65, 66, 67, 78, 81, 82, 83, 88, 93, 97, 104, 108, 114, 115, 117, 119, 120, 128, 129, 131, 137, 138, 143, 150, 155, 161, 163, 164, 179, 190, 199, 201, 224, 229, 235, 236, 245, 248, 250, 254, 255, 257, 259, 260, 271, 283, 286, 289, 292, 299, 304, 314, 317, 323, 326, 330, 332, 334, 335, 342, 350, 355, 356, 360, 363, 365, 368, 371, 372, 373, 376, 377, 383, 389, 395, 396, 397, 402, 403, 410, 423, 428, 429, 430, 438, 445, 447, 452, 454, 456, 457, 458, 461, 465, 467, 473, 474, 476, 480, 483, 485, 488, 492, 493, 494, 495, 496, 499, 500, 501, 502, 503, 506, 507, 508, 510, 511, 517, 519, 520, 534, 536, 538, 540, 543, 545, 560, 566, 570, 571, 579, 581, 584, 589, 591, 592, 597, 598, 600, 601, 612, 613, 614, 615, 621, 625, 628, 629, 632, 643, 644, 647, 650, 663, 666, 682, 684, 692, 694, 695, 702, 707, 711, 716, 718, 720, 721, 727, 729, 730, 732, 734, 739, 752, 754, 770, 771, 774, 779, 788, 795, 801, 805, 809, 813, 817, 819, 820, 821, 823, 824, 827, 837, 840, 841, 842, 847, 861, 866, 869, 870, 875, 876, 884, 886, 889, 890, 901, 904, 909, 912, 913, 927, 932, 935, 937, 945, 952, 962, 966, 969, 980, 989, 991, 994, 998, 1007, 1011, 1012, 1014, 1017, 1018, 1019, 1022, 1032, 1036, 1040, 1043, 1045, 1049, 1051, 1053, 1069, 1075, 1080, 1087, 1097, 1120, 1125, 1126, 1129, 1138, 1158, 1163, 1166, 1167, 1172, 1174, 1178, 1180, 1181, 1183, 1185, 1193, 1198, 1212, 1221, 1228, 1232, 1236, 1239, 1240, 1241, 1251, 1252, 1254, 1259, 1266, 1289, 1305, 1311, 1313, 1314, 1317, 1323, 1324, 1326, 1327, 1331, 1337, 1339, 1342, 1345, 1349, 1353, 1357, 1363, 1367, 1370, 1380, 1386, 1387, 1389, 1392, 1393, 1406, 1407, 1420, 1422, 1426, 1428, 1431, 1440, 1444, 1451, 1453, 1454, 1455, 1462, 1468, 1469, 1473, 1475, 1476, 1481, 1490, 1494, 1496, 1512, 1515, 1517, 1518, 1525, 1526, 1528, 1533, 1547, 1548, 1554, 1558, 1561, 1562, 1572, 1580, 1581, 1585, 1586, 1590, 1593, 1596, 1602, 1603, 1604, 1605, 1606, 1613, 1622, 1624, 1627, 1633, 1636, 1641, 1643, 1645, 1647, 1648, 1650, 1654, 1655, 1658, 1659, 1665, 1668, 1672, 1674, 1679, 1686, 1695, 1712, 1716, 1726, 1727, 1751, 1755, 1756, 1770, 1773, 1780, 1786, 1796, 1799, 1812, 1816, 1824, 1827, 1835, 1836, 1848, 1858, 1860, 1869, 1872, 1905, 1914, 1933, 1944, 1953, 1958, 1964, 1978, 1981, 1982, 1984, 1989, 1990, 1992, 1995, 2014, 2017, 2035, 2064, 2067, 2072, 2076, 2077, 2085, 2094, 2100, 2105, 2107, 2109, 2112, 2114, 2118, 2119, 2121, 2126, 2130, 2133, 2135, 2140, 2141, 2145, 2147, 2161, 2162, 2164, 2167, 2174, 2177, 2183, 2185, 2186, 2188, 2189, 2193, 2194, 2195, 2210, 2212, 2214, 2229, 2230, 2234, 2237, 2240, 2244, 2246, 2249, 2261, 2262, 2265, 2274, 2281, 2293, 2297, 2301, 2304, 2313, 2318, 2320, 2322, 2324, 2330, 2333, 2339, 2340, 2348, 2359, 2360, 2364, 2369, 2373, 2381, 2388, 2395, 2396, 2400, 2404, 2406, 2409, 2410, 2422, 2423, 2425, 2429, 2437, 2440, 2441, 2442, 2469, 2471, 2475, 2486, 2488, 2503, 2515, 2517, 2522, 2524, 2526, 2529, 2532, 2535, 2539, 2549, 2555, 2556, 2557, 2559, 2560, 2574, 2575, 2589, 2595, 2600, 2604, 2605, 2616, 2624, 2626, 2629, 2632, 2644, 2655, 2663, 2667, 2671, 2676, 2678, 2704, 2707, 2714, 2727, 2731, 2736, 2745, 2746, 2749, 2756, 2757, 2758, 2764, 2766, 2767, 2770, 2781, 2788, 2795, 2798, 2801, 2803, 2806, 2807, 2811, 2815, 2816, 2823, 2837, 2843, 2844, 2852, 2856, 2857, 2858, 2875, 2877, 2880, 2882, 2884, 2888, 2899, 2902, 2905, 2906, 2912, 2913, 2915, 2916, 2921, 2931, 2933, 2938, 2944, 2949, 2953, 2965, 2966, 2969, 2973, 2975, 2976, 2977, 2979, 2983, 2990, 2995]}
logs/Qwen2-72B-Instruct_epoch_5.txt CHANGED
@@ -2,17 +2,17 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-175 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-175
8
- 09/08/2024 14:36:36 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 14:36:36 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 14:36:36 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 14:36:36 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 14:47:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 14:47:28 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-175
14
- 09/08/2024 14:47:28 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
@@ -155,3 +155,10 @@ You are an expert in logical reasoning.<|im_end|>
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
 
 
 
 
 
 
 
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-175 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-175
8
+ 09/09/2024 10:46:33 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 10:46:33 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 10:46:33 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 10:46:33 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
+ 09/09/2024 10:57:14 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
+ 09/09/2024 10:57:15 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-175
14
+ 09/09/2024 10:57:15 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
+ (2) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
 
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
158
+ (3) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
159
+ 49.064 GB of memory reserved.
160
+ text ... Qwen/Qwen2-72B-Instruct/checkpoint-175_torch.bfloat16_4bit_lf
161
+ 0 甄加索是自杀吗 ... 不是
162
+
163
+ [1 rows x 11 columns]
164
+ {'accuracy': 0.7376666666666667, 'incorrect_ids': [8, 11, 16, 24, 27, 28, 29, 31, 32, 34, 36, 37, 38, 59, 60, 61, 65, 66, 67, 75, 78, 81, 82, 83, 88, 93, 94, 97, 102, 104, 106, 108, 110, 114, 115, 119, 120, 122, 124, 128, 129, 131, 139, 143, 150, 155, 156, 161, 163, 164, 179, 190, 192, 199, 200, 201, 222, 224, 225, 227, 229, 231, 235, 243, 245, 250, 251, 253, 255, 257, 259, 260, 261, 271, 275, 278, 282, 286, 287, 291, 294, 299, 304, 311, 314, 321, 323, 326, 328, 330, 332, 334, 335, 342, 350, 351, 353, 354, 355, 356, 358, 359, 360, 362, 365, 368, 370, 371, 372, 373, 374, 376, 377, 383, 386, 389, 391, 395, 396, 397, 399, 402, 408, 410, 413, 417, 428, 429, 430, 440, 445, 447, 449, 452, 454, 456, 458, 461, 472, 473, 474, 476, 480, 483, 486, 490, 492, 493, 494, 495, 499, 501, 502, 506, 507, 508, 509, 510, 511, 514, 517, 519, 520, 534, 536, 540, 560, 564, 570, 571, 579, 581, 589, 591, 593, 596, 598, 600, 601, 612, 613, 614, 621, 625, 628, 629, 632, 636, 643, 644, 647, 649, 662, 663, 665, 666, 667, 668, 670, 681, 682, 684, 686, 692, 695, 701, 702, 705, 707, 711, 716, 720, 721, 722, 727, 729, 730, 731, 732, 734, 735, 739, 743, 754, 770, 771, 774, 778, 781, 783, 786, 788, 789, 790, 791, 792, 795, 797, 798, 799, 801, 803, 805, 808, 813, 817, 818, 820, 821, 823, 824, 837, 840, 847, 849, 857, 864, 866, 868, 869, 870, 876, 884, 888, 889, 890, 894, 899, 901, 904, 906, 909, 912, 913, 917, 927, 930, 935, 937, 940, 945, 952, 962, 969, 970, 971, 980, 991, 993, 994, 998, 1006, 1007, 1012, 1014, 1018, 1020, 1031, 1032, 1036, 1040, 1043, 1045, 1049, 1051, 1053, 1061, 1066, 1067, 1069, 1071, 1075, 1076, 1080, 1087, 1111, 1112, 1116, 1120, 1125, 1126, 1135, 1138, 1139, 1149, 1153, 1158, 1161, 1166, 1172, 1174, 1176, 1178, 1180, 1181, 1185, 1198, 1202, 1203, 1209, 1212, 1216, 1221, 1228, 1232, 1236, 1239, 1240, 1241, 1245, 1251, 1252, 1254, 1255, 1256, 1259, 1289, 1292, 1297, 1305, 1308, 1311, 1313, 1315, 1317, 1321, 1324, 1331, 1333, 1335, 1342, 1345, 1347, 1349, 1357, 1362, 1363, 1370, 1380, 1384, 1386, 1387, 1389, 1391, 1392, 1393, 1402, 1406, 1413, 1418, 1420, 1422, 1426, 1428, 1431, 1437, 1440, 1444, 1445, 1448, 1450, 1453, 1454, 1456, 1457, 1462, 1468, 1469, 1470, 1473, 1476, 1485, 1490, 1494, 1495, 1496, 1512, 1517, 1518, 1522, 1525, 1526, 1533, 1547, 1548, 1554, 1556, 1558, 1560, 1562, 1581, 1585, 1586, 1590, 1593, 1594, 1596, 1602, 1603, 1604, 1605, 1606, 1613, 1622, 1631, 1635, 1636, 1641, 1645, 1647, 1648, 1650, 1654, 1655, 1658, 1659, 1668, 1672, 1673, 1674, 1679, 1686, 1690, 1695, 1704, 1712, 1716, 1726, 1727, 1740, 1751, 1755, 1756, 1758, 1761, 1768, 1770, 1773, 1780, 1786, 1787, 1795, 1796, 1798, 1799, 1810, 1812, 1816, 1827, 1835, 1836, 1841, 1845, 1847, 1848, 1851, 1852, 1854, 1858, 1860, 1867, 1869, 1872, 1907, 1917, 1925, 1933, 1934, 1943, 1944, 1950, 1953, 1958, 1964, 1965, 1967, 1978, 1981, 1984, 1990, 1992, 1994, 1995, 2001, 2014, 2015, 2017, 2030, 2035, 2044, 2046, 2061, 2062, 2064, 2072, 2076, 2077, 2091, 2092, 2094, 2095, 2100, 2102, 2105, 2107, 2109, 2112, 2114, 2118, 2119, 2121, 2123, 2126, 2128, 2129, 2130, 2131, 2133, 2135, 2141, 2144, 2145, 2147, 2159, 2161, 2162, 2164, 2167, 2177, 2179, 2180, 2183, 2185, 2186, 2188, 2193, 2194, 2195, 2197, 2205, 2210, 2212, 2215, 2217, 2221, 2223, 2226, 2229, 2230, 2240, 2244, 2246, 2249, 2261, 2262, 2265, 2274, 2280, 2281, 2285, 2287, 2290, 2293, 2297, 2301, 2304, 2311, 2312, 2313, 2318, 2319, 2320, 2322, 2324, 2330, 2333, 2339, 2340, 2343, 2345, 2348, 2360, 2364, 2366, 2369, 2373, 2388, 2395, 2399, 2400, 2406, 2409, 2410, 2423, 2424, 2425, 2429, 2437, 2440, 2441, 2442, 2445, 2446, 2448, 2471, 2472, 2474, 2477, 2484, 2486, 2488, 2491, 2501, 2502, 2508, 2511, 2515, 2516, 2517, 2518, 2522, 2526, 2529, 2530, 2532, 2534, 2535, 2538, 2547, 2548, 2549, 2554, 2555, 2556, 2557, 2559, 2560, 2562, 2569, 2574, 2575, 2589, 2600, 2604, 2610, 2617, 2624, 2629, 2630, 2632, 2639, 2652, 2655, 2660, 2661, 2663, 2664, 2667, 2672, 2676, 2678, 2697, 2699, 2704, 2710, 2714, 2727, 2731, 2735, 2736, 2744, 2746, 2747, 2749, 2751, 2754, 2756, 2757, 2760, 2762, 2764, 2766, 2767, 2770, 2787, 2788, 2797, 2798, 2803, 2806, 2807, 2814, 2815, 2816, 2818, 2823, 2824, 2837, 2842, 2843, 2844, 2852, 2856, 2857, 2861, 2867, 2875, 2876, 2877, 2880, 2882, 2884, 2888, 2890, 2899, 2902, 2905, 2906, 2912, 2913, 2915, 2916, 2919, 2921, 2931, 2933, 2937, 2944, 2949, 2953, 2955, 2962, 2963, 2966, 2972, 2973, 2975, 2976, 2977, 2980, 2983, 2985, 2988, 2991, 2992, 2995, 2999]}
logs/Qwen2-72B-Instruct_epoch_6.txt CHANGED
@@ -2,17 +2,17 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-210 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-210
8
- 09/08/2024 14:52:00 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 14:52:00 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 14:52:00 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 14:52:00 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 15:02:41 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 15:02:43 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-210
14
- 09/08/2024 15:02:43 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
@@ -155,3 +155,10 @@ You are an expert in logical reasoning.<|im_end|>
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
 
 
 
 
 
 
 
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-210 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-210
8
+ 09/09/2024 11:46:49 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 11:46:49 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 11:46:49 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 11:46:49 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
+ 09/09/2024 11:57:22 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
+ 09/09/2024 11:57:23 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-210
14
+ 09/09/2024 11:57:23 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
+ (2) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
 
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
  Batch output: ['不是', '是']
158
+ (3) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
159
+ 49.064 GB of memory reserved.
160
+ text ... Qwen/Qwen2-72B-Instruct/checkpoint-210_torch.bfloat16_4bit_lf
161
+ 0 甄加索是自杀吗 ... 不是
162
+
163
+ [1 rows x 12 columns]
164
+ {'accuracy': 0.763, 'incorrect_ids': [11, 16, 27, 29, 31, 33, 34, 35, 36, 58, 59, 61, 65, 66, 67, 78, 81, 83, 88, 93, 94, 104, 108, 109, 112, 115, 117, 118, 124, 128, 129, 131, 138, 139, 143, 150, 155, 161, 163, 164, 172, 179, 190, 192, 193, 199, 200, 224, 225, 229, 234, 238, 240, 243, 245, 248, 250, 251, 255, 259, 260, 261, 262, 271, 286, 292, 295, 299, 304, 311, 314, 316, 317, 321, 322, 323, 326, 328, 330, 332, 334, 335, 347, 350, 353, 354, 355, 356, 357, 360, 362, 363, 365, 368, 370, 371, 372, 373, 377, 389, 395, 396, 397, 402, 409, 410, 414, 423, 428, 429, 430, 442, 445, 447, 452, 454, 455, 456, 458, 461, 473, 476, 480, 481, 488, 492, 493, 495, 496, 498, 501, 502, 506, 507, 508, 510, 511, 514, 517, 519, 520, 534, 536, 540, 545, 553, 560, 568, 570, 571, 579, 581, 584, 589, 591, 593, 594, 597, 598, 600, 601, 603, 610, 613, 614, 615, 621, 622, 625, 626, 628, 629, 632, 636, 643, 644, 647, 663, 666, 668, 682, 684, 686, 692, 695, 701, 702, 707, 716, 718, 720, 721, 722, 727, 729, 730, 731, 732, 734, 739, 740, 754, 768, 770, 774, 778, 779, 785, 788, 789, 797, 798, 801, 805, 808, 809, 813, 817, 818, 819, 820, 821, 823, 824, 837, 840, 841, 842, 847, 849, 861, 866, 869, 870, 873, 876, 884, 885, 886, 888, 889, 890, 901, 904, 909, 913, 927, 929, 930, 932, 935, 937, 945, 952, 958, 962, 966, 968, 969, 980, 986, 989, 991, 994, 998, 1006, 1012, 1014, 1017, 1018, 1019, 1025, 1031, 1032, 1036, 1038, 1040, 1043, 1045, 1046, 1049, 1051, 1053, 1069, 1076, 1077, 1078, 1080, 1087, 1091, 1107, 1111, 1116, 1117, 1120, 1121, 1125, 1126, 1129, 1135, 1138, 1139, 1143, 1150, 1158, 1163, 1166, 1167, 1172, 1174, 1177, 1178, 1180, 1181, 1185, 1203, 1209, 1212, 1221, 1228, 1232, 1236, 1239, 1240, 1241, 1242, 1246, 1251, 1252, 1254, 1256, 1259, 1282, 1289, 1305, 1308, 1311, 1314, 1315, 1317, 1324, 1326, 1339, 1342, 1349, 1353, 1357, 1363, 1364, 1367, 1370, 1380, 1384, 1385, 1386, 1387, 1389, 1392, 1393, 1395, 1402, 1406, 1416, 1418, 1420, 1422, 1425, 1426, 1428, 1430, 1440, 1444, 1451, 1452, 1453, 1454, 1455, 1462, 1469, 1476, 1481, 1490, 1494, 1495, 1496, 1512, 1516, 1517, 1518, 1525, 1526, 1528, 1544, 1547, 1548, 1556, 1558, 1560, 1561, 1562, 1565, 1572, 1580, 1585, 1590, 1593, 1594, 1596, 1603, 1604, 1605, 1606, 1616, 1620, 1622, 1627, 1631, 1636, 1637, 1641, 1643, 1648, 1650, 1654, 1655, 1658, 1659, 1665, 1668, 1669, 1672, 1674, 1679, 1686, 1695, 1700, 1701, 1704, 1712, 1716, 1718, 1726, 1727, 1751, 1755, 1756, 1768, 1770, 1785, 1786, 1795, 1796, 1797, 1812, 1816, 1827, 1835, 1836, 1837, 1841, 1851, 1852, 1854, 1858, 1860, 1869, 1897, 1907, 1909, 1914, 1915, 1917, 1930, 1933, 1943, 1949, 1950, 1958, 1964, 1978, 1981, 1984, 1990, 1992, 1995, 2001, 2002, 2014, 2015, 2017, 2028, 2035, 2036, 2046, 2047, 2053, 2059, 2062, 2064, 2072, 2076, 2077, 2085, 2091, 2092, 2094, 2095, 2100, 2105, 2107, 2109, 2112, 2114, 2118, 2119, 2121, 2125, 2126, 2130, 2133, 2140, 2141, 2145, 2147, 2150, 2155, 2159, 2161, 2162, 2164, 2167, 2177, 2183, 2185, 2186, 2188, 2189, 2192, 2193, 2194, 2196, 2199, 2210, 2212, 2215, 2221, 2229, 2237, 2240, 2249, 2250, 2255, 2262, 2265, 2274, 2276, 2287, 2297, 2301, 2311, 2312, 2313, 2318, 2320, 2322, 2324, 2333, 2340, 2354, 2359, 2360, 2364, 2369, 2373, 2388, 2395, 2396, 2400, 2404, 2405, 2406, 2409, 2410, 2423, 2424, 2425, 2429, 2437, 2440, 2442, 2445, 2448, 2469, 2471, 2486, 2491, 2501, 2503, 2511, 2515, 2517, 2518, 2522, 2525, 2526, 2529, 2532, 2533, 2535, 2539, 2548, 2549, 2554, 2555, 2556, 2557, 2559, 2560, 2562, 2566, 2575, 2589, 2595, 2600, 2604, 2606, 2607, 2616, 2624, 2626, 2629, 2632, 2660, 2661, 2667, 2672, 2676, 2678, 2689, 2704, 2707, 2714, 2715, 2727, 2731, 2733, 2735, 2736, 2744, 2746, 2749, 2751, 2754, 2756, 2757, 2758, 2760, 2761, 2764, 2766, 2767, 2788, 2795, 2797, 2798, 2801, 2803, 2806, 2807, 2811, 2814, 2815, 2816, 2823, 2837, 2843, 2844, 2852, 2854, 2857, 2880, 2882, 2884, 2888, 2899, 2902, 2905, 2906, 2912, 2913, 2915, 2916, 2921, 2931, 2933, 2938, 2944, 2949, 2953, 2962, 2965, 2966, 2969, 2973, 2976, 2980, 2981, 2988, 2991, 2995, 2999]}
logs/Qwen2-72B-Instruct_epoch_7.txt CHANGED
@@ -2,17 +2,17 @@ loading env vars from: /common/home/users/d/dh.huang.2023/common2/code/logical-r
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-245 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
- (1) GPU = NVIDIA L40. Max memory = 44.309 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-245
8
- 09/08/2024 15:07:09 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
- 09/08/2024 15:07:09 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
- 09/08/2024 15:07:09 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
- 09/08/2024 15:07:09 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
- 09/08/2024 15:17:57 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
- 09/08/2024 15:17:59 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-245
14
- 09/08/2024 15:17:59 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
- (2) GPU = NVIDIA L40. Max memory = 44.309 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
@@ -154,4 +154,3 @@ You are an expert in logical reasoning.<|im_end|>
154
  <|im_start|>assistant
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
157
- Batch output: ['不是', '是']
 
2
  Adding /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning to sys.path
3
  loading /common/home/users/d/dh.huang.2023/common2/code/logical-reasoning/llm_toolkit/logical_reasoning_utils.py
4
  Qwen/Qwen2-72B-Instruct llama-factory/saves/Qwen2-72B-Instruct/checkpoint-245 True datasets/mgtv results/Qwen2-72B-Instruct_p2.csv
5
+ (1) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
6
  0.0 GB of memory reserved.
7
  loading model: Qwen/Qwen2-72B-Instruct with adapter: llama-factory/saves/Qwen2-72B-Instruct/checkpoint-245
8
+ 09/09/2024 12:46:08 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>
9
+ 09/09/2024 12:46:08 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.
10
+ 09/09/2024 12:46:08 - INFO - llamafactory.model.model_utils.quantization - Quantizing model to 4 bit with bitsandbytes.
11
+ 09/09/2024 12:46:08 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
12
+ 09/09/2024 12:56:51 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
13
+ 09/09/2024 12:56:52 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/Qwen2-72B-Instruct/checkpoint-245
14
+ 09/09/2024 12:56:52 - INFO - llamafactory.model.loader - all params: 72,811,470,848
15
+ (2) GPU = NVIDIA H100 PCIe. Max memory = 79.097 GB.
16
  43.037 GB of memory reserved.
17
  loading train/test data files
18
  DatasetDict({
 
154
  <|im_start|>assistant
155
 
156
  Evaluating model: Qwen/Qwen2-72B-Instruct
 
results/Qwen2-72B-Instruct_p2.csv CHANGED
The diff for this file is too large to render. See raw diff
 
scripts/tune-mgtv-bf16.sh CHANGED
@@ -21,4 +21,4 @@ export LOGICAL_REASONING_RESULTS_PATH=results/mgtv-results_bf16.csv
21
 
22
  $BASEDIR/scripts/tune-lf_v2.sh shenzhi-wang Llama3.1-8B-Chinese-Chat llama3
23
 
24
- $BASEDIR/scripts/tune-lf_v2.sh shenzhi-wang Mistral-7B-v0.3-Chinese-Chat mistral
 
21
 
22
  $BASEDIR/scripts/tune-lf_v2.sh shenzhi-wang Llama3.1-8B-Chinese-Chat llama3
23
 
24
+ #$BASEDIR/scripts/tune-lf_v2.sh shenzhi-wang Mistral-7B-v0.3-Chinese-Chat mistral