{"cells":[{"cell_type":"code","execution_count":3,"metadata":{"executionInfo":{"elapsed":476,"status":"ok","timestamp":1720679526275,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"uWKRSV6eZsCn"},"outputs":[],"source":["%load_ext autoreload\n","%autoreload 2"]},{"cell_type":"code","execution_count":4,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"eb33b19f-1206-41ee-84e2-e6258a12eef7","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2534,"status":"ok","timestamp":1720679529344,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"xwFh14uiZBrI","outputId":"d767799c-34c2-46a5-f052-378146a55321"},"outputs":[],"source":["from pathlib import Path\n","\n","try:\n"," from google.colab import drive\n","\n"," drive.mount(\"/content/drive\")\n"," workding_dir = \"/content/drive/MyDrive/logical-reasoning/\"\n","except ModuleNotFoundError:\n"," workding_dir = str(Path.cwd().parent)"]},{"cell_type":"code","execution_count":17,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"6d394937-6c99-4a7c-9d32-7600a280032f","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":5,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"G5pNu3zgZBrL","outputId":"160a554f-fb08-4aa0-bc00-0422fb7c1fac"},"outputs":[{"name":"stdout","output_type":"stream","text":["workding dir: /home/inflaton/code/projects/courses/logical-reasoning\n"]}],"source":["import os\n","import sys\n","from pathlib import Path\n","\n","os.chdir(workding_dir)\n","sys.path.append(workding_dir)\n","print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":4,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"ac667aba-076e-4de6-9984-8f6a67cb09cd","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"0dVRAabNZBrL","outputId":"b977e116-df16-47cd-9160-a24f611da687"},"outputs":[{"data":{"text/plain":["False"]},"execution_count":4,"metadata":{},"output_type":"execute_result"}],"source":["need_to_setup_env = False\n","need_to_setup_env"]},{"cell_type":"code","execution_count":5,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"72f9cf79-7b0d-4d9e-90a0-1fa5251b947f","showTitle":false,"title":""},"executionInfo":{"elapsed":4,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hKUOfP2HZBrL"},"outputs":[],"source":["if need_to_setup_env:\n"," %pip install -r requirements.txt\n"," %cd /content/\n"," %rm -rf LLaMA-Factory\n"," !git clone https://github.com/hiyouga/LLaMA-Factory.git\n"," %cd LLaMA-Factory\n"," %ls\n"," %pip install -e .[torch,bitsandbytes]\n"," \n"," os.chdir(workding_dir)\n"," sys.path.append(workding_dir)\n"," print(\"workding dir:\", workding_dir)"]},{"cell_type":"code","execution_count":22,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"9f67ec60-2f24-411c-84eb-0dd664b44775","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"hPCC-6m7ZBrM","outputId":"c7aa2c96-5e99-440a-c148-201d79465ff9"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n"]},{"data":{"text/plain":["True"]},"execution_count":22,"metadata":{},"output_type":"execute_result"}],"source":["from dotenv import find_dotenv, load_dotenv\n","\n","found_dotenv = find_dotenv(\".env\")\n","\n","if len(found_dotenv) == 0:\n"," found_dotenv = find_dotenv(\".env.example\")\n","print(f\"loading env vars from: {found_dotenv}\")\n","load_dotenv(found_dotenv, override=True)"]},{"cell_type":"code","execution_count":23,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"f1597656-8042-4878-9d3b-9ebfb8dd86dc","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"1M3IraVtZBrM","outputId":"29ab35f6-2970-4ade-d85d-3174acf8cda0"},"outputs":[{"name":"stdout","output_type":"stream","text":["internlm/internlm2_5-7b-chat-1m None False datasets/mgtv results/mgtv-results_h100.csv\n"]}],"source":["import os\n","\n","model_name = os.getenv(\"MODEL_NAME\")\n","adapter_name_or_path = os.getenv(\"ADAPTER_NAME_OR_PATH\")\n","load_in_4bit = os.getenv(\"LOAD_IN_4BIT\") == \"true\"\n","data_path = os.getenv(\"LOGICAL_REASONING_DATA_PATH\")\n","results_path = os.getenv(\"LOGICAL_REASONING_RESULTS_PATH\")\n","use_english_datasets = os.getenv(\"USE_ENGLISH_DATASETS\") == \"true\"\n","\n","print(model_name, adapter_name_or_path, load_in_4bit, data_path, results_path)"]},{"cell_type":"code","execution_count":8,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"e3ab54ba-7b6d-4817-bf2e-c5d711508b58","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2,"status":"ok","timestamp":1720679529345,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"mrVEz6UsZBrM","outputId":"8bcff769-2573-4dae-e124-d5b5d2382d71"},"outputs":[{"name":"stdout","output_type":"stream","text":["Sat Jul 13 15:41:52 2024 \n","+---------------------------------------------------------------------------------------+\n","| NVIDIA-SMI 545.23.07 Driver Version: 546.12 CUDA Version: 12.3 |\n","|-----------------------------------------+----------------------+----------------------+\n","| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|=========================================+======================+======================|\n","| 0 NVIDIA GeForce RTX 4080 ... On | 00000000:01:00.0 On | N/A |\n","| N/A 63C P8 6W / 150W | 632MiB / 12282MiB | 15% Default |\n","| | | N/A |\n","+-----------------------------------------+----------------------+----------------------+\n"," \n","+---------------------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=======================================================================================|\n","| No running processes found |\n","+---------------------------------------------------------------------------------------+\n"]}],"source":["!nvidia-smi"]},{"cell_type":"code","execution_count":9,"metadata":{"application/vnd.databricks.v1+cell":{"cellMetadata":{"byteLimit":2048000,"rowLimit":10000},"inputWidgets":{},"nuid":"b2a43943-9324-4839-9a47-cfa72de2244b","showTitle":false,"title":""},"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":564,"status":"ok","timestamp":1720679529907,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"UgMvt6dIZBrM","outputId":"ce37581c-fd26-46c2-ad87-d933d99f68f7"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.11.9\n","\u001b[33mWARNING: Package(s) not found: flash-attn\u001b[0m\u001b[33m\n","\u001b[0mCPU times: user 14.8 ms, sys: 0 ns, total: 14.8 ms\n","Wall time: 647 ms\n"]}],"source":["%%time\n","!python --version\n","!pip show flash-attn"]},{"cell_type":"code","execution_count":10,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1685,"status":"ok","timestamp":1720679531591,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"ZuS_FsLyZBrN","outputId":"2cba0105-c505-4395-afbd-2f2fee6581d0"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n"]}],"source":["from llm_toolkit.logical_reasoning_utils import *"]},{"cell_type":"code","execution_count":11,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":224},"executionInfo":{"elapsed":715,"status":"ok","timestamp":1720679532304,"user":{"displayName":"HUANG DONGHAO _","userId":"00977795705617022768"},"user_tz":-480},"id":"muFDE9DpZBrN","outputId":"95672b22-99b7-41b7-f992-18b193994f66"},"outputs":[{"name":"stdout","output_type":"stream","text":["loading existing data from: llama-factory/data/alpaca_mgtv_p1.json\n"]},{"data":{"text/html":["
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
instructioninputoutput
0你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者...不是
1你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者...不是
2你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者...不重要
3你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者...不是
4你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者...
\n","
"],"text/plain":[" instruction input output\n","0 你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者... 不是\n","1 你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者... 不是\n","2 你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者... 不重要\n","3 你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者... 不是\n","4 你是一个逻辑游戏的主持人。游戏规则如下:\\n\\n1. 参与者会得到一个谜题。\\n2. 参与者... 是"]},"execution_count":11,"metadata":{},"output_type":"execute_result"}],"source":["df_alpaca = load_alpaca_data(data_path)\n","df_alpaca.head()"]},{"cell_type":"code","execution_count":12,"metadata":{"id":"L370pvGTZBrN"},"outputs":[],"source":["def evaluate_model_all_epochs(model_name, adapter_path_base, num_train_epochs, start_epoch=0, load_in_4bit=True, num_of_entries=-1):\n"," os.environ[\"MODEL_NAME\"] = model_name\n"," os.environ[\"LOAD_IN_4BIT\"] = \"true\" if load_in_4bit else \"false\"\n"," for i in range(start_epoch, num_train_epochs + 1):\n"," print(f\"Epoch {i}\")\n"," if i == 0:\n"," os.unsetenv(\"ADAPTER_NAME_OR_PATH\")\n"," else:\n"," adapter_path = f\"{adapter_path_base}/checkpoint-{44 * i}\"\n"," os.environ[\"ADAPTER_NAME_OR_PATH\"] = adapter_path\n","\n"," !python llm_toolkit/eval_logical_reasoning.py {num_of_entries}"]},{"cell_type":"code","execution_count":13,"metadata":{"id":"WUFjhxmiZBrN"},"outputs":[{"name":"stdout","output_type":"stream","text":["Epoch 1\n","loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n","Adding /home/inflaton/code/projects/courses/logical-reasoning to sys.path\n","loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","internlm/internlm2_5-7b-chat-1m llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-44 False datasets/mgtv results/mgtv-results_h100.csv\n","(1) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","0.0 GB of memory reserved.\n","loading model: internlm/internlm2_5-7b-chat-1m\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 15:42:21,524 >> loading file ./tokenizer.model from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/./tokenizer.model\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 15:42:21,524 >> loading file added_tokens.json from cache at None\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 15:42:21,524 >> loading file special_tokens_map.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/special_tokens_map.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 15:42:21,524 >> loading file tokenizer_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/tokenizer_config.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 15:42:21,524 >> loading file tokenizer.json from cache at None\n","07/13/2024 15:42:22 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>\n","07/13/2024 15:42:22 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.\n","[INFO|configuration_utils.py:733] 2024-07-13 15:42:32,885 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:733] 2024-07-13 15:42:53,246 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:800] 2024-07-13 15:42:53,247 >> Model config InternLM2Config {\n"," \"_name_or_path\": \"internlm/internlm2_5-7b-chat-1m\",\n"," \"architectures\": [\n"," \"InternLM2ForCausalLM\"\n"," ],\n"," \"attn_implementation\": \"eager\",\n"," \"auto_map\": {\n"," \"AutoConfig\": \"internlm/internlm2_5-7b-chat-1m--configuration_internlm2.InternLM2Config\",\n"," \"AutoModel\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\",\n"," \"AutoModelForCausalLM\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\"\n"," },\n"," \"bias\": false,\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"hidden_act\": \"silu\",\n"," \"hidden_size\": 4096,\n"," \"initializer_range\": 0.02,\n"," \"intermediate_size\": 14336,\n"," \"max_position_embeddings\": 262144,\n"," \"model_type\": \"internlm2\",\n"," \"num_attention_heads\": 32,\n"," \"num_hidden_layers\": 32,\n"," \"num_key_value_heads\": 8,\n"," \"pad_token_id\": 2,\n"," \"pretraining_tp\": 1,\n"," \"rms_norm_eps\": 1e-05,\n"," \"rope_scaling\": {\n"," \"factor\": 2.5,\n"," \"type\": \"dynamic\"\n"," },\n"," \"rope_theta\": 50000000,\n"," \"tie_word_embeddings\": false,\n"," \"torch_dtype\": \"bfloat16\",\n"," \"transformers_version\": \"4.42.3\",\n"," \"use_cache\": true,\n"," \"vocab_size\": 92544\n","}\n","\n","07/13/2024 15:42:53 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.\n","[INFO|modeling_utils.py:3556] 2024-07-13 15:43:03,558 >> loading weights file model.safetensors from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/model.safetensors.index.json\n","[INFO|modeling_utils.py:1531] 2024-07-13 15:43:03,803 >> Instantiating InternLM2ForCausalLM model under default dtype torch.bfloat16.\n","[INFO|configuration_utils.py:1000] 2024-07-13 15:43:03,804 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"pad_token_id\": 2\n","}\n","\n","Loading checkpoint shards: 100%|█████████████████| 8/8 [25:24<00:00, 190.62s/it]\n","[INFO|modeling_utils.py:4364] 2024-07-13 16:08:28,967 >> All model checkpoint weights were used when initializing InternLM2ForCausalLM.\n","\n","[INFO|modeling_utils.py:4372] 2024-07-13 16:08:28,967 >> All the weights of InternLM2ForCausalLM were initialized from the model checkpoint at internlm/internlm2_5-7b-chat-1m.\n","If your task is similar to the task the model of the checkpoint was trained on, you can already use InternLM2ForCausalLM for predictions without further training.\n","[INFO|configuration_utils.py:955] 2024-07-13 16:08:39,003 >> loading configuration file generation_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/generation_config.json\n","[INFO|configuration_utils.py:1000] 2024-07-13 16:08:39,003 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": [\n"," 2,\n"," 92542\n"," ],\n"," \"pad_token_id\": 2\n","}\n","\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/13/2024 16:08:39 - INFO - llamafactory.model.model_utils.attention - Using vanilla attention implementation.\n","INFO:llamafactory.model.model_utils.attention:Using vanilla attention implementation.\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/13/2024 16:24:10 - INFO - llamafactory.model.adapter - Merged 1 adapter(s).\n","INFO:llamafactory.model.adapter:Merged 1 adapter(s).\n","07/13/2024 16:24:10 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-44\n","INFO:llamafactory.model.adapter:Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-44\n","07/13/2024 16:24:10 - INFO - llamafactory.model.loader - all params: 7,737,708,544\n","INFO:llamafactory.model.loader:all params: 7,737,708,544\n","(2) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n","loading train/test data files\n","DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n","--------------------------------------------------\n","text: 甄加索是自杀吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 海岸之谜\n","--------------------------------------------------\n","puzzle: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","--------------------------------------------------\n","truth: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","--------------------------------------------------\n","text: 死者受伤了吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 甄庄哭声\n","--------------------------------------------------\n","puzzle: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","--------------------------------------------------\n","truth: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","Evaluating model: internlm/internlm2_5-7b-chat-1m\n"," 0%| | 0/3000 [00:00\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n","100%|█████████████████████████████████████| 3000/3000 [3:33:36<00:00, 4.27s/it]\n","(3) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n"," text ... internlm/internlm2_5-7b-chat-1m_checkpoint-44\n","0 甄加索是自杀吗 ... 不是\n","\n","[1 rows x 6 columns]\n","{'accuracy': 0.7616666666666667, 'incorrect_ids': [9, 11, 12, 13, 24, 27, 29, 31, 34, 36, 55, 59, 61, 65, 66, 67, 78, 81, 83, 88, 93, 97, 103, 104, 106, 108, 110, 112, 120, 121, 123, 128, 129, 135, 138, 139, 149, 150, 153, 155, 161, 164, 170, 173, 179, 190, 199, 200, 202, 218, 224, 228, 234, 245, 250, 252, 253, 259, 260, 261, 269, 271, 275, 276, 279, 284, 286, 299, 301, 304, 314, 318, 321, 323, 328, 330, 334, 335, 337, 341, 342, 346, 350, 352, 353, 355, 356, 360, 361, 362, 370, 371, 373, 376, 377, 383, 386, 389, 395, 397, 410, 416, 428, 429, 430, 432, 438, 440, 445, 447, 450, 452, 454, 456, 457, 458, 471, 472, 473, 476, 479, 480, 490, 492, 493, 494, 495, 497, 501, 502, 506, 507, 508, 510, 511, 514, 517, 519, 520, 530, 536, 540, 560, 566, 570, 571, 579, 581, 589, 591, 592, 593, 596, 597, 601, 609, 612, 613, 614, 621, 622, 625, 628, 632, 636, 643, 644, 647, 665, 666, 671, 678, 680, 682, 684, 689, 695, 702, 705, 708, 709, 721, 727, 729, 730, 732, 734, 739, 740, 754, 758, 760, 766, 770, 771, 772, 774, 778, 785, 791, 794, 798, 801, 803, 805, 808, 809, 810, 814, 817, 818, 819, 820, 821, 822, 823, 824, 833, 837, 840, 844, 847, 857, 859, 861, 862, 866, 869, 870, 875, 876, 884, 889, 890, 899, 901, 904, 913, 927, 935, 937, 945, 952, 958, 962, 964, 966, 969, 970, 980, 982, 986, 989, 991, 993, 994, 998, 1001, 1003, 1005, 1006, 1011, 1012, 1014, 1015, 1019, 1020, 1022, 1031, 1036, 1038, 1040, 1043, 1051, 1053, 1056, 1066, 1069, 1071, 1075, 1076, 1078, 1080, 1083, 1087, 1089, 1091, 1096, 1107, 1111, 1116, 1120, 1125, 1126, 1129, 1138, 1163, 1164, 1166, 1170, 1173, 1174, 1177, 1178, 1180, 1181, 1183, 1185, 1198, 1203, 1212, 1228, 1232, 1239, 1240, 1241, 1251, 1252, 1254, 1258, 1259, 1266, 1282, 1289, 1300, 1305, 1308, 1311, 1313, 1314, 1315, 1317, 1323, 1324, 1326, 1331, 1337, 1339, 1342, 1349, 1353, 1357, 1363, 1364, 1367, 1370, 1380, 1385, 1386, 1387, 1388, 1389, 1391, 1392, 1393, 1395, 1406, 1407, 1417, 1420, 1422, 1425, 1426, 1430, 1438, 1440, 1443, 1444, 1445, 1448, 1450, 1451, 1453, 1454, 1455, 1457, 1459, 1462, 1468, 1469, 1476, 1478, 1486, 1490, 1493, 1494, 1496, 1501, 1517, 1518, 1525, 1526, 1533, 1544, 1547, 1556, 1559, 1560, 1573, 1581, 1585, 1586, 1587, 1590, 1593, 1596, 1602, 1603, 1604, 1605, 1613, 1622, 1624, 1627, 1633, 1636, 1637, 1641, 1645, 1647, 1648, 1650, 1654, 1655, 1658, 1659, 1668, 1669, 1672, 1673, 1674, 1676, 1679, 1683, 1686, 1695, 1712, 1716, 1718, 1726, 1727, 1734, 1736, 1751, 1755, 1756, 1758, 1768, 1773, 1780, 1786, 1790, 1791, 1796, 1799, 1812, 1816, 1827, 1835, 1841, 1849, 1854, 1858, 1860, 1867, 1869, 1879, 1888, 1907, 1909, 1914, 1933, 1944, 1945, 1953, 1958, 1959, 1963, 1964, 1973, 1978, 1981, 1984, 1985, 1989, 1990, 1991, 1992, 1994, 1995, 1998, 2001, 2007, 2017, 2021, 2022, 2028, 2035, 2036, 2038, 2049, 2054, 2062, 2064, 2072, 2076, 2077, 2089, 2092, 2094, 2100, 2106, 2109, 2110, 2112, 2118, 2119, 2121, 2126, 2133, 2135, 2145, 2147, 2150, 2161, 2162, 2164, 2169, 2177, 2181, 2185, 2186, 2188, 2189, 2193, 2194, 2195, 2197, 2199, 2208, 2212, 2215, 2222, 2223, 2226, 2230, 2233, 2237, 2240, 2247, 2249, 2250, 2253, 2257, 2261, 2262, 2265, 2274, 2281, 2285, 2297, 2304, 2311, 2312, 2319, 2320, 2322, 2324, 2330, 2335, 2344, 2348, 2354, 2356, 2360, 2364, 2366, 2378, 2380, 2388, 2395, 2400, 2404, 2409, 2410, 2423, 2424, 2425, 2429, 2437, 2440, 2441, 2442, 2445, 2446, 2448, 2463, 2469, 2471, 2484, 2488, 2491, 2493, 2512, 2515, 2517, 2522, 2529, 2530, 2532, 2535, 2537, 2538, 2539, 2546, 2547, 2548, 2549, 2555, 2556, 2559, 2560, 2562, 2563, 2565, 2575, 2589, 2590, 2594, 2600, 2606, 2607, 2610, 2616, 2624, 2626, 2629, 2632, 2635, 2644, 2660, 2663, 2672, 2676, 2681, 2685, 2686, 2707, 2710, 2714, 2716, 2731, 2736, 2742, 2744, 2746, 2749, 2751, 2754, 2757, 2760, 2761, 2762, 2764, 2766, 2770, 2777, 2788, 2797, 2798, 2803, 2806, 2807, 2812, 2815, 2816, 2818, 2820, 2823, 2837, 2841, 2843, 2845, 2854, 2857, 2861, 2877, 2880, 2882, 2884, 2888, 2896, 2899, 2905, 2912, 2913, 2915, 2916, 2921, 2927, 2933, 2937, 2944, 2949, 2953, 2975, 2977, 2979, 2980, 2981, 2985, 2991, 2995, 2998, 2999]}\n","Epoch 2\n","loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n","Adding /home/inflaton/code/projects/courses/logical-reasoning to sys.path\n","loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","internlm/internlm2_5-7b-chat-1m llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-88 False datasets/mgtv results/mgtv-results_h100.csv\n","(1) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","0.0 GB of memory reserved.\n","loading model: internlm/internlm2_5-7b-chat-1m\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 19:58:37,675 >> loading file ./tokenizer.model from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/./tokenizer.model\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 19:58:37,675 >> loading file added_tokens.json from cache at None\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 19:58:37,675 >> loading file special_tokens_map.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/special_tokens_map.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 19:58:37,675 >> loading file tokenizer_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/tokenizer_config.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-13 19:58:37,675 >> loading file tokenizer.json from cache at None\n","07/13/2024 19:58:39 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>\n","07/13/2024 19:58:39 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.\n","[INFO|configuration_utils.py:733] 2024-07-13 19:58:49,814 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:733] 2024-07-13 19:59:10,596 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:800] 2024-07-13 19:59:10,596 >> Model config InternLM2Config {\n"," \"_name_or_path\": \"internlm/internlm2_5-7b-chat-1m\",\n"," \"architectures\": [\n"," \"InternLM2ForCausalLM\"\n"," ],\n"," \"attn_implementation\": \"eager\",\n"," \"auto_map\": {\n"," \"AutoConfig\": \"internlm/internlm2_5-7b-chat-1m--configuration_internlm2.InternLM2Config\",\n"," \"AutoModel\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\",\n"," \"AutoModelForCausalLM\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\"\n"," },\n"," \"bias\": false,\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"hidden_act\": \"silu\",\n"," \"hidden_size\": 4096,\n"," \"initializer_range\": 0.02,\n"," \"intermediate_size\": 14336,\n"," \"max_position_embeddings\": 262144,\n"," \"model_type\": \"internlm2\",\n"," \"num_attention_heads\": 32,\n"," \"num_hidden_layers\": 32,\n"," \"num_key_value_heads\": 8,\n"," \"pad_token_id\": 2,\n"," \"pretraining_tp\": 1,\n"," \"rms_norm_eps\": 1e-05,\n"," \"rope_scaling\": {\n"," \"factor\": 2.5,\n"," \"type\": \"dynamic\"\n"," },\n"," \"rope_theta\": 50000000,\n"," \"tie_word_embeddings\": false,\n"," \"torch_dtype\": \"bfloat16\",\n"," \"transformers_version\": \"4.42.3\",\n"," \"use_cache\": true,\n"," \"vocab_size\": 92544\n","}\n","\n","07/13/2024 19:59:10 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.\n","[INFO|modeling_utils.py:3556] 2024-07-13 19:59:22,038 >> loading weights file model.safetensors from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/model.safetensors.index.json\n","[INFO|modeling_utils.py:1531] 2024-07-13 19:59:22,721 >> Instantiating InternLM2ForCausalLM model under default dtype torch.bfloat16.\n","[INFO|configuration_utils.py:1000] 2024-07-13 19:59:22,722 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"pad_token_id\": 2\n","}\n","\n","Loading checkpoint shards: 100%|█████████████████| 8/8 [41:38<00:00, 312.33s/it]\n","[INFO|modeling_utils.py:4364] 2024-07-13 20:41:01,526 >> All model checkpoint weights were used when initializing InternLM2ForCausalLM.\n","\n","[INFO|modeling_utils.py:4372] 2024-07-13 20:41:01,526 >> All the weights of InternLM2ForCausalLM were initialized from the model checkpoint at internlm/internlm2_5-7b-chat-1m.\n","If your task is similar to the task the model of the checkpoint was trained on, you can already use InternLM2ForCausalLM for predictions without further training.\n","[INFO|configuration_utils.py:955] 2024-07-13 20:41:11,551 >> loading configuration file generation_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/generation_config.json\n","[INFO|configuration_utils.py:1000] 2024-07-13 20:41:11,551 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": [\n"," 2,\n"," 92542\n"," ],\n"," \"pad_token_id\": 2\n","}\n","\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/13/2024 20:41:11 - INFO - llamafactory.model.model_utils.attention - Using vanilla attention implementation.\n","INFO:llamafactory.model.model_utils.attention:Using vanilla attention implementation.\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/13/2024 20:47:02 - INFO - llamafactory.model.adapter - Merged 1 adapter(s).\n","INFO:llamafactory.model.adapter:Merged 1 adapter(s).\n","07/13/2024 20:47:02 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-88\n","INFO:llamafactory.model.adapter:Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-88\n","07/13/2024 20:47:02 - INFO - llamafactory.model.loader - all params: 7,737,708,544\n","INFO:llamafactory.model.loader:all params: 7,737,708,544\n","(2) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n","loading train/test data files\n","DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n","--------------------------------------------------\n","text: 甄加索是自杀吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 海岸之谜\n","--------------------------------------------------\n","puzzle: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","--------------------------------------------------\n","truth: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","--------------------------------------------------\n","text: 死者受伤了吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 甄庄哭声\n","--------------------------------------------------\n","puzzle: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","--------------------------------------------------\n","truth: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","Evaluating model: internlm/internlm2_5-7b-chat-1m\n"," 0%| | 0/3000 [00:00\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n","100%|█████████████████████████████████████| 3000/3000 [4:01:51<00:00, 4.84s/it]\n","(3) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n"," text ... internlm/internlm2_5-7b-chat-1m_checkpoint-88\n","0 甄加索是自杀吗 ... 不是\n","\n","[1 rows x 7 columns]\n","{'accuracy': 0.7413333333333333, 'incorrect_ids': [9, 11, 12, 13, 24, 29, 31, 34, 36, 38, 55, 58, 59, 61, 65, 66, 67, 78, 81, 82, 83, 88, 93, 96, 97, 103, 104, 106, 108, 112, 115, 117, 119, 120, 121, 123, 128, 129, 135, 137, 138, 139, 149, 150, 153, 155, 161, 163, 164, 170, 171, 173, 179, 190, 199, 200, 218, 222, 224, 225, 226, 227, 228, 234, 245, 250, 251, 253, 259, 260, 265, 269, 271, 275, 276, 284, 286, 299, 301, 304, 311, 314, 318, 321, 323, 330, 334, 335, 337, 338, 341, 342, 346, 350, 352, 353, 354, 355, 356, 357, 360, 361, 368, 370, 371, 373, 374, 377, 383, 386, 389, 395, 397, 410, 416, 421, 428, 429, 430, 432, 438, 440, 447, 450, 452, 454, 456, 458, 461, 471, 472, 473, 474, 476, 479, 480, 488, 490, 493, 494, 495, 497, 501, 502, 504, 506, 507, 508, 510, 511, 514, 515, 517, 518, 519, 520, 530, 536, 540, 553, 560, 566, 571, 579, 581, 589, 591, 593, 596, 597, 601, 612, 613, 614, 621, 625, 626, 628, 632, 636, 643, 644, 647, 649, 650, 663, 665, 666, 671, 678, 680, 682, 684, 686, 689, 695, 702, 705, 708, 709, 711, 721, 727, 729, 730, 734, 739, 740, 754, 758, 766, 770, 771, 772, 774, 778, 785, 791, 794, 798, 801, 803, 805, 808, 809, 810, 817, 818, 819, 820, 821, 822, 823, 824, 833, 837, 840, 844, 847, 857, 859, 861, 862, 866, 869, 870, 875, 876, 884, 888, 889, 890, 899, 901, 904, 912, 913, 927, 935, 937, 945, 952, 962, 964, 966, 969, 970, 980, 982, 986, 989, 991, 993, 994, 998, 1001, 1003, 1005, 1006, 1007, 1011, 1012, 1014, 1022, 1031, 1036, 1040, 1043, 1045, 1051, 1053, 1061, 1066, 1069, 1071, 1075, 1076, 1078, 1080, 1083, 1087, 1096, 1098, 1107, 1111, 1116, 1120, 1125, 1126, 1129, 1138, 1153, 1161, 1163, 1164, 1166, 1167, 1170, 1172, 1173, 1174, 1177, 1178, 1180, 1181, 1185, 1203, 1212, 1228, 1232, 1239, 1240, 1241, 1242, 1243, 1251, 1252, 1254, 1256, 1258, 1259, 1266, 1282, 1289, 1296, 1300, 1305, 1308, 1311, 1313, 1314, 1315, 1317, 1323, 1324, 1326, 1331, 1332, 1337, 1349, 1353, 1357, 1363, 1364, 1367, 1369, 1370, 1380, 1385, 1386, 1387, 1388, 1389, 1391, 1392, 1393, 1395, 1400, 1406, 1407, 1409, 1413, 1417, 1420, 1422, 1425, 1426, 1430, 1438, 1440, 1443, 1444, 1445, 1448, 1450, 1451, 1453, 1454, 1455, 1457, 1458, 1459, 1462, 1469, 1470, 1476, 1478, 1486, 1490, 1493, 1494, 1496, 1501, 1512, 1517, 1518, 1525, 1526, 1528, 1533, 1536, 1544, 1547, 1554, 1556, 1559, 1560, 1573, 1581, 1585, 1586, 1587, 1590, 1593, 1596, 1602, 1603, 1604, 1605, 1613, 1622, 1624, 1627, 1632, 1633, 1636, 1637, 1641, 1645, 1647, 1650, 1654, 1655, 1658, 1659, 1668, 1669, 1672, 1673, 1674, 1676, 1679, 1683, 1686, 1690, 1695, 1700, 1712, 1716, 1718, 1726, 1727, 1734, 1736, 1751, 1755, 1756, 1758, 1773, 1780, 1786, 1791, 1796, 1799, 1801, 1804, 1809, 1812, 1816, 1820, 1827, 1833, 1835, 1841, 1848, 1849, 1854, 1858, 1860, 1867, 1869, 1872, 1879, 1888, 1897, 1914, 1915, 1933, 1944, 1945, 1949, 1953, 1958, 1963, 1964, 1973, 1978, 1981, 1984, 1989, 1990, 1991, 1992, 1994, 1995, 1998, 2001, 2007, 2010, 2015, 2017, 2021, 2025, 2029, 2035, 2036, 2037, 2038, 2046, 2049, 2052, 2054, 2059, 2062, 2063, 2064, 2072, 2076, 2077, 2092, 2094, 2100, 2105, 2106, 2109, 2110, 2112, 2118, 2119, 2121, 2126, 2130, 2133, 2135, 2139, 2141, 2145, 2147, 2161, 2162, 2164, 2167, 2169, 2177, 2181, 2185, 2186, 2188, 2189, 2193, 2194, 2195, 2197, 2199, 2208, 2209, 2212, 2215, 2222, 2223, 2226, 2230, 2233, 2237, 2240, 2243, 2246, 2247, 2249, 2250, 2253, 2254, 2257, 2261, 2262, 2265, 2274, 2280, 2281, 2285, 2304, 2311, 2312, 2313, 2318, 2319, 2320, 2322, 2324, 2334, 2335, 2344, 2348, 2354, 2359, 2360, 2364, 2366, 2378, 2388, 2389, 2395, 2400, 2404, 2406, 2409, 2410, 2412, 2423, 2424, 2425, 2429, 2437, 2440, 2441, 2442, 2445, 2446, 2448, 2463, 2469, 2484, 2488, 2491, 2493, 2499, 2501, 2512, 2515, 2517, 2522, 2529, 2530, 2532, 2535, 2537, 2538, 2539, 2546, 2547, 2548, 2549, 2553, 2554, 2555, 2556, 2559, 2560, 2562, 2563, 2565, 2574, 2575, 2589, 2590, 2594, 2602, 2606, 2607, 2610, 2616, 2623, 2624, 2626, 2629, 2632, 2635, 2644, 2660, 2663, 2672, 2676, 2681, 2707, 2710, 2714, 2715, 2716, 2731, 2736, 2742, 2744, 2746, 2749, 2751, 2754, 2756, 2757, 2760, 2761, 2762, 2764, 2766, 2767, 2770, 2777, 2788, 2798, 2801, 2803, 2806, 2807, 2812, 2815, 2816, 2820, 2823, 2837, 2841, 2843, 2845, 2854, 2857, 2861, 2877, 2880, 2882, 2884, 2888, 2899, 2902, 2905, 2912, 2913, 2915, 2916, 2919, 2921, 2927, 2933, 2937, 2944, 2949, 2953, 2962, 2963, 2975, 2979, 2980, 2981, 2983, 2985, 2991, 2995, 2998, 2999]}\n","Epoch 3\n","loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n","Adding /home/inflaton/code/projects/courses/logical-reasoning to sys.path\n","loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","internlm/internlm2_5-7b-chat-1m llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-132 False datasets/mgtv results/mgtv-results_h100.csv\n","(1) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","0.0 GB of memory reserved.\n","loading model: internlm/internlm2_5-7b-chat-1m\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 00:49:37,357 >> loading file ./tokenizer.model from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/./tokenizer.model\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 00:49:37,357 >> loading file added_tokens.json from cache at None\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 00:49:37,357 >> loading file special_tokens_map.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/special_tokens_map.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 00:49:37,357 >> loading file tokenizer_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/tokenizer_config.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 00:49:37,358 >> loading file tokenizer.json from cache at None\n","07/14/2024 00:49:38 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>\n","07/14/2024 00:49:38 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.\n","[INFO|configuration_utils.py:733] 2024-07-14 00:49:48,953 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:733] 2024-07-14 00:50:09,339 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:800] 2024-07-14 00:50:09,339 >> Model config InternLM2Config {\n"," \"_name_or_path\": \"internlm/internlm2_5-7b-chat-1m\",\n"," \"architectures\": [\n"," \"InternLM2ForCausalLM\"\n"," ],\n"," \"attn_implementation\": \"eager\",\n"," \"auto_map\": {\n"," \"AutoConfig\": \"internlm/internlm2_5-7b-chat-1m--configuration_internlm2.InternLM2Config\",\n"," \"AutoModel\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\",\n"," \"AutoModelForCausalLM\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\"\n"," },\n"," \"bias\": false,\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"hidden_act\": \"silu\",\n"," \"hidden_size\": 4096,\n"," \"initializer_range\": 0.02,\n"," \"intermediate_size\": 14336,\n"," \"max_position_embeddings\": 262144,\n"," \"model_type\": \"internlm2\",\n"," \"num_attention_heads\": 32,\n"," \"num_hidden_layers\": 32,\n"," \"num_key_value_heads\": 8,\n"," \"pad_token_id\": 2,\n"," \"pretraining_tp\": 1,\n"," \"rms_norm_eps\": 1e-05,\n"," \"rope_scaling\": {\n"," \"factor\": 2.5,\n"," \"type\": \"dynamic\"\n"," },\n"," \"rope_theta\": 50000000,\n"," \"tie_word_embeddings\": false,\n"," \"torch_dtype\": \"bfloat16\",\n"," \"transformers_version\": \"4.42.3\",\n"," \"use_cache\": true,\n"," \"vocab_size\": 92544\n","}\n","\n","07/14/2024 00:50:09 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.\n","[INFO|modeling_utils.py:3556] 2024-07-14 00:50:19,681 >> loading weights file model.safetensors from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/model.safetensors.index.json\n","[INFO|modeling_utils.py:1531] 2024-07-14 00:50:19,848 >> Instantiating InternLM2ForCausalLM model under default dtype torch.bfloat16.\n","[INFO|configuration_utils.py:1000] 2024-07-14 00:50:19,848 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"pad_token_id\": 2\n","}\n","\n","Loading checkpoint shards: 100%|██████████████████| 8/8 [09:56<00:00, 74.50s/it]\n","[INFO|modeling_utils.py:4364] 2024-07-14 01:00:16,038 >> All model checkpoint weights were used when initializing InternLM2ForCausalLM.\n","\n","[INFO|modeling_utils.py:4372] 2024-07-14 01:00:16,038 >> All the weights of InternLM2ForCausalLM were initialized from the model checkpoint at internlm/internlm2_5-7b-chat-1m.\n","If your task is similar to the task the model of the checkpoint was trained on, you can already use InternLM2ForCausalLM for predictions without further training.\n","[INFO|configuration_utils.py:955] 2024-07-14 01:00:16,567 >> loading configuration file generation_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/generation_config.json\n","[INFO|configuration_utils.py:1000] 2024-07-14 01:00:16,568 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": [\n"," 2,\n"," 92542\n"," ],\n"," \"pad_token_id\": 2\n","}\n","\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/14/2024 01:00:16 - INFO - llamafactory.model.model_utils.attention - Using vanilla attention implementation.\n","INFO:llamafactory.model.model_utils.attention:Using vanilla attention implementation.\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/14/2024 01:05:49 - INFO - llamafactory.model.adapter - Merged 1 adapter(s).\n","INFO:llamafactory.model.adapter:Merged 1 adapter(s).\n","07/14/2024 01:05:49 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-132\n","INFO:llamafactory.model.adapter:Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-132\n","07/14/2024 01:05:49 - INFO - llamafactory.model.loader - all params: 7,737,708,544\n","INFO:llamafactory.model.loader:all params: 7,737,708,544\n","(2) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n","loading train/test data files\n","DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n","--------------------------------------------------\n","text: 甄加索是自杀吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 海岸之谜\n","--------------------------------------------------\n","puzzle: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","--------------------------------------------------\n","truth: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","--------------------------------------------------\n","text: 死者受伤了吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 甄庄哭声\n","--------------------------------------------------\n","puzzle: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","--------------------------------------------------\n","truth: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","Evaluating model: internlm/internlm2_5-7b-chat-1m\n"," 0%| | 0/3000 [00:00\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n","100%|█████████████████████████████████████| 3000/3000 [3:42:30<00:00, 4.45s/it]\n","(3) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n"," text ... internlm/internlm2_5-7b-chat-1m_checkpoint-132\n","0 甄加索是自杀吗 ... 不是\n","\n","[1 rows x 8 columns]\n","{'accuracy': 0.755, 'incorrect_ids': [6, 9, 11, 13, 24, 29, 31, 33, 34, 36, 55, 58, 59, 61, 65, 66, 67, 78, 81, 83, 88, 93, 94, 96, 97, 104, 106, 108, 112, 117, 119, 120, 121, 123, 128, 129, 135, 137, 138, 139, 149, 150, 153, 161, 163, 164, 171, 173, 190, 199, 200, 207, 218, 222, 224, 226, 228, 234, 245, 248, 250, 251, 253, 259, 260, 265, 269, 271, 275, 276, 283, 284, 286, 299, 303, 304, 311, 314, 318, 321, 323, 330, 334, 335, 338, 341, 342, 346, 347, 350, 352, 353, 355, 356, 357, 360, 361, 368, 371, 373, 374, 377, 383, 386, 389, 391, 395, 397, 404, 410, 416, 421, 428, 429, 430, 432, 438, 440, 452, 454, 456, 457, 458, 471, 472, 473, 476, 480, 488, 492, 493, 494, 495, 497, 501, 502, 506, 507, 508, 510, 511, 514, 515, 517, 518, 520, 528, 530, 536, 537, 540, 553, 560, 561, 566, 570, 571, 579, 581, 589, 591, 593, 596, 597, 601, 610, 612, 613, 614, 621, 625, 628, 632, 636, 643, 644, 647, 649, 650, 657, 663, 666, 671, 678, 680, 682, 689, 695, 702, 705, 708, 709, 711, 721, 727, 729, 730, 732, 734, 739, 740, 754, 758, 766, 769, 770, 771, 772, 774, 778, 791, 794, 798, 800, 801, 803, 805, 809, 810, 817, 818, 819, 820, 821, 822, 823, 824, 833, 837, 840, 842, 844, 847, 856, 857, 859, 861, 862, 866, 869, 870, 875, 884, 888, 889, 890, 899, 901, 904, 909, 913, 927, 935, 937, 943, 945, 952, 962, 964, 966, 969, 970, 980, 982, 986, 991, 993, 994, 998, 1001, 1003, 1005, 1006, 1007, 1011, 1012, 1014, 1019, 1022, 1036, 1040, 1045, 1046, 1051, 1053, 1066, 1069, 1075, 1076, 1080, 1083, 1087, 1096, 1098, 1111, 1116, 1120, 1125, 1126, 1129, 1138, 1153, 1154, 1161, 1163, 1164, 1166, 1167, 1173, 1174, 1177, 1178, 1180, 1181, 1183, 1185, 1203, 1212, 1228, 1232, 1239, 1240, 1241, 1242, 1243, 1251, 1252, 1254, 1256, 1258, 1259, 1262, 1266, 1282, 1289, 1300, 1301, 1305, 1308, 1311, 1313, 1314, 1315, 1317, 1324, 1332, 1333, 1337, 1339, 1345, 1349, 1353, 1357, 1363, 1364, 1367, 1369, 1370, 1380, 1385, 1386, 1387, 1388, 1389, 1391, 1392, 1393, 1395, 1400, 1406, 1407, 1413, 1417, 1420, 1422, 1425, 1426, 1427, 1430, 1438, 1440, 1443, 1444, 1445, 1448, 1450, 1451, 1453, 1454, 1455, 1457, 1459, 1462, 1463, 1469, 1470, 1476, 1478, 1486, 1493, 1494, 1496, 1501, 1512, 1515, 1517, 1518, 1525, 1526, 1528, 1533, 1544, 1547, 1556, 1559, 1560, 1562, 1572, 1573, 1581, 1585, 1586, 1587, 1590, 1593, 1596, 1602, 1603, 1604, 1605, 1613, 1622, 1624, 1627, 1632, 1633, 1637, 1641, 1645, 1647, 1648, 1654, 1655, 1658, 1659, 1668, 1669, 1672, 1673, 1674, 1676, 1679, 1683, 1686, 1690, 1691, 1695, 1700, 1712, 1716, 1726, 1727, 1734, 1751, 1755, 1756, 1770, 1773, 1780, 1786, 1791, 1796, 1799, 1804, 1809, 1812, 1816, 1827, 1833, 1835, 1841, 1848, 1849, 1858, 1860, 1867, 1869, 1879, 1888, 1907, 1913, 1914, 1915, 1935, 1944, 1949, 1953, 1958, 1962, 1963, 1964, 1973, 1978, 1981, 1984, 1987, 1990, 1991, 1992, 1994, 1995, 1998, 2001, 2005, 2007, 2010, 2015, 2017, 2021, 2028, 2029, 2035, 2036, 2049, 2059, 2062, 2063, 2064, 2067, 2072, 2076, 2077, 2094, 2105, 2106, 2107, 2109, 2110, 2112, 2118, 2119, 2121, 2126, 2133, 2135, 2139, 2145, 2147, 2161, 2162, 2164, 2169, 2177, 2185, 2186, 2188, 2189, 2193, 2194, 2195, 2197, 2199, 2208, 2212, 2215, 2223, 2226, 2230, 2237, 2240, 2243, 2247, 2249, 2250, 2253, 2254, 2257, 2260, 2261, 2262, 2265, 2274, 2280, 2281, 2285, 2311, 2312, 2313, 2317, 2318, 2319, 2320, 2322, 2324, 2330, 2335, 2336, 2343, 2344, 2348, 2354, 2360, 2364, 2366, 2378, 2388, 2389, 2395, 2400, 2404, 2409, 2410, 2423, 2424, 2425, 2429, 2437, 2440, 2442, 2445, 2446, 2448, 2461, 2463, 2469, 2484, 2488, 2493, 2501, 2512, 2517, 2522, 2529, 2530, 2532, 2535, 2537, 2538, 2539, 2546, 2547, 2548, 2549, 2553, 2554, 2555, 2556, 2559, 2560, 2562, 2563, 2565, 2574, 2575, 2589, 2590, 2602, 2606, 2607, 2610, 2616, 2624, 2626, 2629, 2632, 2635, 2644, 2660, 2663, 2672, 2676, 2681, 2707, 2710, 2714, 2715, 2716, 2731, 2736, 2742, 2744, 2745, 2749, 2754, 2756, 2757, 2760, 2761, 2762, 2764, 2766, 2767, 2788, 2798, 2803, 2806, 2807, 2812, 2815, 2816, 2820, 2823, 2837, 2841, 2843, 2854, 2857, 2861, 2877, 2882, 2884, 2888, 2899, 2902, 2905, 2912, 2913, 2916, 2921, 2927, 2933, 2937, 2944, 2949, 2953, 2962, 2975, 2977, 2979, 2981, 2990, 2991, 2995, 2998, 2999]}\n","Epoch 4\n","loading env vars from: /home/inflaton/code/projects/courses/logical-reasoning/.env\n","Adding /home/inflaton/code/projects/courses/logical-reasoning to sys.path\n","loading /home/inflaton/code/projects/courses/logical-reasoning/llm_toolkit/logical_reasoning_utils.py\n","internlm/internlm2_5-7b-chat-1m llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-176 False datasets/mgtv results/mgtv-results_h100.csv\n","(1) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","0.0 GB of memory reserved.\n","loading model: internlm/internlm2_5-7b-chat-1m\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 04:48:32,384 >> loading file ./tokenizer.model from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/./tokenizer.model\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 04:48:32,385 >> loading file added_tokens.json from cache at None\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 04:48:32,385 >> loading file special_tokens_map.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/special_tokens_map.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 04:48:32,385 >> loading file tokenizer_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/tokenizer_config.json\n","[INFO|tokenization_utils_base.py:2161] 2024-07-14 04:48:32,385 >> loading file tokenizer.json from cache at None\n","07/14/2024 04:48:33 - INFO - llamafactory.data.template - Replace eos token: <|im_end|>\n","07/14/2024 04:48:33 - INFO - llamafactory.data.template - Add <|im_start|> to stop words.\n","[INFO|configuration_utils.py:733] 2024-07-14 04:48:34,196 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:733] 2024-07-14 04:48:35,199 >> loading configuration file config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/config.json\n","[INFO|configuration_utils.py:800] 2024-07-14 04:48:35,199 >> Model config InternLM2Config {\n"," \"_name_or_path\": \"internlm/internlm2_5-7b-chat-1m\",\n"," \"architectures\": [\n"," \"InternLM2ForCausalLM\"\n"," ],\n"," \"attn_implementation\": \"eager\",\n"," \"auto_map\": {\n"," \"AutoConfig\": \"internlm/internlm2_5-7b-chat-1m--configuration_internlm2.InternLM2Config\",\n"," \"AutoModel\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\",\n"," \"AutoModelForCausalLM\": \"internlm/internlm2_5-7b-chat-1m--modeling_internlm2.InternLM2ForCausalLM\"\n"," },\n"," \"bias\": false,\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"hidden_act\": \"silu\",\n"," \"hidden_size\": 4096,\n"," \"initializer_range\": 0.02,\n"," \"intermediate_size\": 14336,\n"," \"max_position_embeddings\": 262144,\n"," \"model_type\": \"internlm2\",\n"," \"num_attention_heads\": 32,\n"," \"num_hidden_layers\": 32,\n"," \"num_key_value_heads\": 8,\n"," \"pad_token_id\": 2,\n"," \"pretraining_tp\": 1,\n"," \"rms_norm_eps\": 1e-05,\n"," \"rope_scaling\": {\n"," \"factor\": 2.5,\n"," \"type\": \"dynamic\"\n"," },\n"," \"rope_theta\": 50000000,\n"," \"tie_word_embeddings\": false,\n"," \"torch_dtype\": \"bfloat16\",\n"," \"transformers_version\": \"4.42.3\",\n"," \"use_cache\": true,\n"," \"vocab_size\": 92544\n","}\n","\n","07/14/2024 04:48:35 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.\n","[INFO|modeling_utils.py:3556] 2024-07-14 04:48:35,925 >> loading weights file model.safetensors from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/model.safetensors.index.json\n","[INFO|modeling_utils.py:1531] 2024-07-14 04:48:36,322 >> Instantiating InternLM2ForCausalLM model under default dtype torch.bfloat16.\n","[INFO|configuration_utils.py:1000] 2024-07-14 04:48:36,323 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": 2,\n"," \"pad_token_id\": 2\n","}\n","\n","Loading checkpoint shards: 100%|██████████████████| 8/8 [07:15<00:00, 54.50s/it]\n","[INFO|modeling_utils.py:4364] 2024-07-14 04:55:52,484 >> All model checkpoint weights were used when initializing InternLM2ForCausalLM.\n","\n","[INFO|modeling_utils.py:4372] 2024-07-14 04:55:52,484 >> All the weights of InternLM2ForCausalLM were initialized from the model checkpoint at internlm/internlm2_5-7b-chat-1m.\n","If your task is similar to the task the model of the checkpoint was trained on, you can already use InternLM2ForCausalLM for predictions without further training.\n","[INFO|configuration_utils.py:955] 2024-07-14 04:55:52,800 >> loading configuration file generation_config.json from cache at /home/inflaton/.cache/huggingface/hub/models--internlm--internlm2_5-7b-chat-1m/snapshots/8d1a709a04d71440ef3df6ebbe204672f411c8b6/generation_config.json\n","[INFO|configuration_utils.py:1000] 2024-07-14 04:55:52,800 >> Generate config GenerationConfig {\n"," \"bos_token_id\": 1,\n"," \"eos_token_id\": [\n"," 2,\n"," 92542\n"," ],\n"," \"pad_token_id\": 2\n","}\n","\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/14/2024 04:55:52 - INFO - llamafactory.model.model_utils.attention - Using vanilla attention implementation.\n","INFO:llamafactory.model.model_utils.attention:Using vanilla attention implementation.\n","WARNING:root:Some parameters are on the meta device device because they were offloaded to the cpu.\n","07/14/2024 04:59:34 - INFO - llamafactory.model.adapter - Merged 1 adapter(s).\n","INFO:llamafactory.model.adapter:Merged 1 adapter(s).\n","07/14/2024 04:59:34 - INFO - llamafactory.model.adapter - Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-176\n","INFO:llamafactory.model.adapter:Loaded adapter(s): llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full/checkpoint-176\n","07/14/2024 04:59:34 - INFO - llamafactory.model.loader - all params: 7,737,708,544\n","INFO:llamafactory.model.loader:all params: 7,737,708,544\n","(2) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n","loading train/test data files\n","DatasetDict({\n"," train: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 25000\n"," })\n"," test: Dataset({\n"," features: ['text', 'label', 'answer', 'title', 'puzzle', 'truth', 'train_text', 'prompt'],\n"," num_rows: 3000\n"," })\n","})\n","--------------------------------------------------\n","text: 甄加索是自杀吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 海岸之谜\n","--------------------------------------------------\n","puzzle: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","--------------------------------------------------\n","truth: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在远离城市喧嚣的海边小屋,一天清晨,邻居发现甄加索僵卧在沙滩上,已无生命迹象。现场没有发现任何打斗的迹象。请问甄加索的死因是什么?\n","\n","实际情况: 甄加索是一位热爱自然的画家,他每年都会来到这个海边小屋寻找灵感。在他生命的最后几天,他一直在创作一幅描绘海洋生物的画作。在画即将完成的前一天晚上,他骑着自行车外出,打算在海边观赏夜景。然而,他在沙滩上意外发现了一只搁浅的海豚,为了救助这只海豚,他耗费了极大的体力,最终成功将其送回海中。筋疲力尽的甄加索在沙滩上睡着了,由于他患有严重的心脏病,却未告知旁人,在寒冷的海风中,他的心脏停止了跳动。因此,警方在现场只发现了车轮痕迹和未完成的画作,而没有发现任何他杀的迹象。\n","\n","参与者提出的问题: 甄加索是自杀吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","--------------------------------------------------\n","text: 死者受伤了吗\n","--------------------------------------------------\n","label: 不是\n","--------------------------------------------------\n","answer: nan\n","--------------------------------------------------\n","title: 甄庄哭声\n","--------------------------------------------------\n","puzzle: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","--------------------------------------------------\n","truth: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","--------------------------------------------------\n","train_text: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","不是<|im_end|>\n","--------------------------------------------------\n","prompt: <|im_start|>system\n","You are an expert in logical reasoning.<|im_end|>\n","<|im_start|>user\n","你是一个逻辑游戏的主持人。游戏规则如下:\n","\n","1. 参与者会得到一个谜题。\n","2. 参与者可以通过提问来获取线索,尝试解开谜题。\n","3. 对于每个问题,主持人将根据实际情况回答以下五个选项之一:是、不是、不重要、回答正确、问法错误。\n","4. 回答中不能添加任何其它信息,也不能省略选项中的任何一个字。例如,不可以把“不是”省略成“不”。\n","5. 参与者需要根据回答来推理,并最终找出谜题的正确答案。\n","\n","请严格按照这些规则回答参与者提出的问题。\n","\n","谜题: 在一个安静的夜晚,小村庄的湖边突然传来了阵阵哭泣声。第二天早晨,村长甄锐发现湖边的石头上放着一顶破旧的帽子,但没有人知道这顶帽子是从哪里来的,哭泣声又是为何。请还原故事真相。\n","\n","实际情况: 原来,这顶破旧的帽子属于一个小男孩,他小时候与爷爷在湖边生活。爷爷教他钓鱼、游泳,还告诉他湖中的海龟是他们的朋友。后来,小男孩随父母去了城市生活,但每年夏天都会回到村子探望爷爷。然而,去年夏天,爷爷因病去世,小男孩伤心欲绝。今年夏天,他回到村子,来到湖边,想起和爷爷的美好回忆,忍不住哭泣。他将爷爷的帽子放在湖边的石头上,希望能让爷爷的在天之灵得到安慰。那晚的哭泣声正是小男孩在祭莫他亲爱的爷爷。\n","\n","参与者提出的问题: 死者受伤了吗\n","<|im_end|>\n","<|im_start|>assistant\n","\n","Evaluating model: internlm/internlm2_5-7b-chat-1m\n"," 0%| | 0/3000 [00:00\n","--------\n","step 2: 不是\n","--------\n","step 3: 不是\n","--------\n","step 4: 不是\n","--------\n","step 5: 不是\n","100%|█████████████████████████████████████| 3000/3000 [3:34:49<00:00, 4.30s/it]\n","(3) GPU = NVIDIA GeForce RTX 4080 Laptop GPU. Max memory = 11.994 GB.\n","14.451 GB of memory reserved.\n"," text ... internlm/internlm2_5-7b-chat-1m_checkpoint-176\n","0 甄加索是自杀吗 ... 不是\n","\n","[1 rows x 9 columns]\n","{'accuracy': 0.719, 'incorrect_ids': [9, 11, 12, 24, 26, 29, 31, 33, 34, 36, 38, 55, 58, 59, 61, 65, 66, 67, 78, 82, 83, 84, 88, 93, 94, 96, 97, 104, 106, 112, 117, 119, 120, 121, 123, 128, 129, 131, 135, 137, 138, 139, 149, 150, 153, 163, 164, 171, 173, 190, 192, 199, 200, 207, 218, 222, 224, 225, 226, 228, 234, 236, 245, 248, 250, 251, 253, 259, 260, 265, 269, 271, 275, 276, 284, 286, 289, 299, 301, 303, 304, 309, 311, 316, 317, 318, 321, 323, 330, 334, 335, 337, 339, 341, 342, 346, 347, 350, 351, 352, 353, 354, 355, 356, 357, 360, 368, 371, 373, 374, 377, 383, 386, 389, 391, 395, 397, 410, 416, 421, 428, 429, 430, 432, 438, 440, 447, 452, 454, 456, 457, 458, 471, 472, 476, 479, 480, 492, 493, 494, 495, 497, 500, 501, 502, 504, 506, 507, 508, 510, 511, 514, 515, 517, 518, 520, 528, 530, 536, 537, 540, 553, 560, 561, 566, 570, 571, 579, 581, 585, 589, 591, 592, 593, 596, 597, 599, 601, 609, 610, 612, 613, 614, 621, 625, 626, 628, 632, 636, 638, 643, 644, 647, 649, 650, 657, 663, 665, 666, 671, 678, 680, 682, 684, 686, 689, 692, 695, 701, 702, 705, 707, 708, 709, 711, 716, 720, 721, 722, 725, 727, 729, 730, 732, 734, 739, 740, 743, 754, 758, 760, 766, 769, 770, 771, 772, 774, 778, 779, 785, 786, 790, 791, 792, 794, 797, 798, 800, 801, 802, 803, 805, 808, 809, 810, 814, 817, 818, 819, 820, 821, 822, 823, 824, 833, 837, 840, 842, 844, 847, 851, 857, 859, 861, 862, 864, 866, 868, 869, 870, 872, 875, 876, 884, 888, 889, 890, 899, 901, 904, 909, 913, 927, 935, 937, 943, 945, 952, 962, 964, 966, 969, 970, 971, 980, 982, 991, 993, 994, 998, 1003, 1005, 1006, 1007, 1011, 1012, 1015, 1019, 1022, 1025, 1031, 1036, 1040, 1045, 1046, 1051, 1053, 1055, 1061, 1066, 1069, 1071, 1075, 1076, 1080, 1082, 1083, 1085, 1087, 1111, 1114, 1116, 1120, 1125, 1126, 1129, 1135, 1138, 1154, 1161, 1163, 1164, 1166, 1167, 1170, 1173, 1174, 1177, 1178, 1180, 1181, 1183, 1185, 1202, 1203, 1212, 1228, 1232, 1236, 1239, 1240, 1241, 1242, 1249, 1251, 1252, 1254, 1256, 1259, 1266, 1282, 1289, 1296, 1299, 1300, 1301, 1305, 1307, 1308, 1311, 1313, 1314, 1317, 1323, 1324, 1332, 1337, 1339, 1345, 1347, 1349, 1353, 1357, 1363, 1364, 1366, 1367, 1369, 1380, 1385, 1386, 1387, 1388, 1389, 1390, 1392, 1393, 1395, 1400, 1406, 1407, 1412, 1413, 1417, 1420, 1422, 1425, 1426, 1427, 1428, 1430, 1438, 1440, 1443, 1444, 1445, 1448, 1450, 1451, 1453, 1454, 1455, 1457, 1459, 1462, 1469, 1470, 1476, 1478, 1481, 1486, 1490, 1493, 1494, 1496, 1500, 1501, 1512, 1517, 1518, 1525, 1526, 1528, 1533, 1544, 1547, 1548, 1554, 1559, 1560, 1562, 1572, 1573, 1581, 1585, 1586, 1587, 1590, 1593, 1596, 1603, 1604, 1605, 1611, 1622, 1627, 1632, 1633, 1637, 1638, 1641, 1643, 1645, 1647, 1648, 1650, 1654, 1655, 1658, 1659, 1660, 1668, 1669, 1672, 1673, 1674, 1676, 1679, 1683, 1686, 1690, 1695, 1700, 1701, 1712, 1716, 1726, 1727, 1751, 1755, 1756, 1770, 1773, 1780, 1784, 1786, 1791, 1796, 1799, 1804, 1809, 1812, 1816, 1825, 1827, 1831, 1833, 1835, 1848, 1849, 1851, 1854, 1858, 1860, 1867, 1869, 1872, 1879, 1881, 1888, 1897, 1905, 1909, 1914, 1915, 1924, 1933, 1935, 1944, 1949, 1953, 1958, 1962, 1963, 1964, 1973, 1978, 1981, 1984, 1987, 1990, 1991, 1992, 1994, 1995, 1998, 2001, 2002, 2005, 2007, 2008, 2010, 2014, 2015, 2017, 2021, 2025, 2035, 2036, 2037, 2038, 2046, 2062, 2064, 2067, 2072, 2076, 2077, 2092, 2094, 2101, 2105, 2106, 2107, 2109, 2112, 2118, 2119, 2121, 2126, 2130, 2133, 2135, 2139, 2140, 2141, 2145, 2147, 2161, 2162, 2164, 2167, 2169, 2177, 2185, 2186, 2189, 2193, 2194, 2195, 2197, 2199, 2208, 2210, 2212, 2215, 2223, 2226, 2230, 2237, 2240, 2243, 2246, 2247, 2249, 2250, 2253, 2254, 2257, 2261, 2262, 2265, 2274, 2280, 2281, 2285, 2297, 2309, 2311, 2312, 2313, 2317, 2318, 2319, 2320, 2322, 2324, 2326, 2330, 2335, 2336, 2343, 2344, 2348, 2354, 2360, 2364, 2366, 2376, 2388, 2395, 2400, 2404, 2406, 2409, 2410, 2412, 2423, 2425, 2429, 2437, 2440, 2441, 2442, 2444, 2445, 2446, 2448, 2461, 2463, 2469, 2474, 2484, 2488, 2491, 2493, 2499, 2501, 2512, 2517, 2522, 2526, 2529, 2530, 2532, 2535, 2537, 2538, 2539, 2546, 2547, 2548, 2549, 2553, 2554, 2555, 2556, 2558, 2559, 2560, 2562, 2563, 2565, 2567, 2569, 2574, 2575, 2581, 2589, 2590, 2594, 2599, 2600, 2602, 2604, 2606, 2607, 2610, 2613, 2616, 2624, 2626, 2629, 2632, 2655, 2660, 2663, 2670, 2671, 2672, 2676, 2678, 2680, 2681, 2684, 2704, 2707, 2710, 2714, 2715, 2728, 2731, 2736, 2742, 2744, 2748, 2749, 2754, 2756, 2757, 2760, 2761, 2762, 2764, 2766, 2767, 2770, 2788, 2798, 2801, 2803, 2805, 2806, 2807, 2812, 2815, 2816, 2820, 2823, 2837, 2841, 2843, 2845, 2850, 2852, 2854, 2856, 2857, 2867, 2877, 2882, 2884, 2888, 2899, 2902, 2903, 2905, 2906, 2912, 2913, 2916, 2921, 2927, 2931, 2933, 2938, 2939, 2944, 2949, 2950, 2953, 2955, 2962, 2973, 2975, 2979, 2981, 2983, 2990, 2991, 2995, 2998, 2999]}\n","CPU times: user 15min 6s, sys: 4min 44s, total: 19min 51s\n","Wall time: 16h 52min 33s\n"]}],"source":["%%time\n","\n","evaluate_model_all_epochs(\"internlm/internlm2_5-7b-chat-1m\", \"llama-factory/saves/internlm2_5_7b/lora/sft_bf16_p1_full\", 4, start_epoch=1, load_in_4bit=False, num_of_entries=-1)"]}],"metadata":{"accelerator":"GPU","application/vnd.databricks.v1+notebook":{"dashboards":[],"environmentMetadata":null,"language":"python","notebookMetadata":{"mostRecentlyExecutedCommandWithImplicitDF":{"commandId":-1,"dataframes":["_sqldf"]},"pythonIndentUnit":4},"notebookName":"10_eval-lf-medium-py3.11","widgets":{}},"colab":{"gpuType":"L4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"}},"nbformat":4,"nbformat_minor":0}