{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Defaulting to user installation because normal site-packages is not writeable\n", "Requirement already satisfied: pandas in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (2.2.0)\n", "Requirement already satisfied: tzdata>=2022.7 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n", "Requirement already satisfied: numpy<2,>=1.22.4 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (1.26.1)\n", "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /Users/picocreator/Library/Python/3.9/lib/python/site-packages (from pandas) (2024.1)\n", "Requirement already satisfied: six>=1.5 in /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas) (1.15.0)\n", "\u001b[33mWARNING: You are using pip version 21.2.4; however, version 24.2 is available.\n", "You should consider upgrading via the '/Library/Developer/CommandLineTools/usr/bin/python3 -m pip install --upgrade pip' command.\u001b[0m\n" ] } ], "source": [ "!pip3 install pandas" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Get the filelist\n", "\n", "For the full results.json" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 6263 results.json files\n" ] } ], "source": [ "import glob\n", "\n", "# Specify the path to the folder containing the results.json files\n", "folder_path = \"lm-eval-output\"\n", "\n", "# Use glob to find all the results.json files\n", "results_json_files = glob.glob(f\"{folder_path}/**/results.json\", recursive=True)\n", "\n", "# Show total number of results.json files found\n", "print(f\"Found {len(results_json_files)} results.json files\")\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Process all the results.json\n", "\n", "One file at a time" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Processed example: {'name': 'mistralai/Mistral-7B-Instruct-v0.2', 'config': {'dtype=bfloat16,trust_remote_code=True': {'confStr': 'dtype=bfloat16,trust_remote_code=True', 'confObj': {'dtype': 'bfloat16', 'trust_remote_code': 'True'}, 'results': {'cola': {'mcc,none': 0.12965656914783247, 'mcc_stderr,none': 0.032433640730190394, 'alias': 'cola'}}, 'groups': {}}}}\n" ] } ], "source": [ "import json\n", "\n", "# Global result map if it's not set\n", "if 'global_result_map' not in globals():\n", " global_result_map = {}\n", "\n", "#\n", "# Function to process the results.json file\n", "#\n", "def process_results_json(file_path):\n", " with open(file_path) as f:\n", " data = json.load(f)\n", "\n", " # Model args, presplit by ','\n", " model_args = data['config']['model_args'].split(',')\n", "\n", " # Extract the pretrained value from config.model_args\n", " modelname = model_args[0].split('=')[1]\n", "\n", " # Opt array\n", " confArgsArr = model_args[1:]\n", "\n", " # Sort the opt array\n", " confArgsArr.sort()\n", " # Convert it to a string\n", " confStr = ','.join(confArgsArr)\n", "\n", " # Convert the option array of key=value strings to a dictionary\n", " confObj = { }\n", " for o in confArgsArr:\n", " k, v = o.split('=')\n", " confObj[k] = v\n", " \n", " # Create a dictionary to store the results, or use the existing one if it exists\n", " if modelname in global_result_map:\n", " modelObj = global_result_map[modelname]\n", " else:\n", " modelObj = {\n", " 'name': modelname,\n", " 'config': { }\n", " }\n", " \n", " # Get the opt object for the model\n", " if confStr in modelObj['config']:\n", " confSet = modelObj['config'][confStr]\n", " else:\n", " confSet = {\n", " 'confStr': confStr,\n", " 'confObj': confObj,\n", " 'results': {},\n", " 'groups': {}\n", " }\n", "\n", " # Iterate over the results and extract the result object for each test/group\n", " if 'results' in data:\n", " for test, result in data['results'].items():\n", " confSet['results'][test] = result\n", " if 'groups' in data:\n", " for test, result in data['groups'].items():\n", " confSet['groups'][test] = result\n", " \n", " # Update the global result map object\n", " modelObj['config'][confStr] = confSet\n", " global_result_map[modelname] = modelObj\n", " return modelObj\n", "\n", "# Lets test the function with the first results.json file\n", "first_result = process_results_json(results_json_files[0])\n", "print(f\"Processed example: \", first_result)\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 134 models\n", "Models: \n", "['mistralai/Mistral-7B-Instruct-v0.2', 'mistralai/Mistral-7B-v0.1', 'mosaicml/mpt-7b-instruct', 'mosaicml/mpt-7b', 'mosaicml/mpt-7b-chat', 'bigscience/bloom-7b1', 'bigscience/bloomz-7b1-mt', 'bigscience/bloomz-7b1', 'EleutherAI/pythia-2.8b', 'EleutherAI/pythia-1.4b', 'EleutherAI/gpt-j-6b', 'EleutherAI/pythia-6.9b', 'google/flan-t5-base', 'google/gemma-2b', 'google/gemma-2b-it', 'google/gemma-7b', 'google/gemma-7b-it', 'google/flan-t5-large', 'microsoft/phi-1_5', 'microsoft/phi-2', 'microsoft/phi-1', 'allenai/OLMo-7B', 'TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T', 'TinyLlama/TinyLlama-1.1B-Chat-v1.0', 'RWKV/rwkv-5-world-1b5', 'RWKV/rwkv-5-world-3b', 'RWKV/rwkv-4-world-3b', 'RWKV/v5-EagleX-v2-7B-HF', 'RWKV/rwkv-6-world-1b6', 'RWKV/rwkv-4-world-1b5', 'RWKV/v5-Eagle-7B-HF', 'RWKV/v6-Finch-7B-HF', 'RWKV/rwkv-6-world-3b-v2.1', 'RWKV/rwkv-4-world-7b', 'RWKV/v6-Finch-14B-HF', 'RWKV/rwkv-raven-7b', 'RWKV/rwkv-6-world-3b', 'aisingapore/sealion7b', 'aisingapore/sealion3b', './rwkv-x-dev/1_3-C5-rwkv-270_pth', './rwkv-x-dev/225-EagleX-PreFT-C', './rwkv-x-dev/225-EagleX-PreFT-D', './rwkv-x-dev/1_0_pth', './rwkv-x-dev/chunk4-0_85_pth', './rwkv-x-dev/1_3-C1-rwkv-340_pth', './rwkv-x-dev/chunk1-0_8_pth', './rwkv-x-dev/chunk0-0_8_pth', './rwkv-x-dev/225-EagleX-PreFT-E', './rwkv-x-dev/225-EagleX-PreFT-B', './rwkv-x-dev/blink4-final_pth', './rwkv-x-dev/chunk2-0_8_pth', './rwkv-x-dev/chunk3-0_8_pth', './rwkv-x-dev/r3-4k-test2-fix3-blink-final_pth', './rwkv-x-dev/R4-7B-15t-With-Mask_pth', './rwkv-x-dev/r3-testchunk-1-8_pth', './rwkv-x-dev/R4-with-shuffle-rwkv-53_pth', './rwkv-x-dev/chunk7-2-0_85_pth', './rwkv-x-dev/EagleX-1_7T_pth', './rwkv-x-dev/r3-testchunk2-blink-fixed_pth', './rwkv-x-dev/r3-testchunk2-blink_pth', './rwkv-x-dev/rwkv-230_pth', './rwkv-x-dev/1_3-C0-rwkv-60_pth', './rwkv-x-dev/chunk5-0_85_pth', './rwkv-x-dev/R4-7B-Base-No-Mask_pth', './rwkv-x-dev/RWKV-5-World-1B5-v2-20231025-ctx4096', './rwkv-x-dev/R4-1B5-No-Mask_pth', './rwkv-x-dev/RWKV-32K-5B-RW_pth', './rwkv-x-dev/R4-7B-15t-32k-No-Mask_pth', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-60_pth', './rwkv-x-dev/EagleX_1-7T_Chat_pth', './rwkv-x-dev/1_3-C1-rwkv-390_pth', './rwkv-x-dev/1_3-C1-rwkv-20_pth', './rwkv-x-dev/chunk8-1-0_85_pth', './rwkv-x-dev/R4-7B-Base-32k-No-Mask_pth', './rwkv-x-dev/R4-no-shuffle-rwkv-53_pth', './rwkv-x-dev/1_3-C2-rwkv-648_pth', './rwkv-x-dev/1_3-C2-rwkv-250_pth', './rwkv-x-dev/r3-testchunk-1-8-no-cuda-with-warmup_pth', './rwkv-x-dev/1_3-C0-rwkv-140_pth', './rwkv-x-dev/bruber_9b', './rwkv-x-dev/Eagle-225-1FT', './rwkv-x-dev/225-EagleX-PreFT-A', './rwkv-x-dev/225-EagleX-PreFT-F', './rwkv-x-dev/r3-c1-8_pth', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-450_pth', './rwkv-x-dev/RWKV-5-World-3B-v2-20231118-ctx16k', './rwkv-x-dev/1_3-C0-PREPRERUN-rwkv-40_pth', './rwkv-x-dev/RWKV-5-World-7B-v2-20240128-ctx4096', './rwkv-x-dev/R4-7B-15t-No-Mask_pth', './rwkv-x-dev/1_0-c1-290_pth', './rwkv-x-dev/R4-1B5-With-Mask_pth', './rwkv-x-dev/Quetzal-N8-1', './rwkv-x-dev/1_3-C0-PREPRERUN-rwkv-30_pth', './rwkv-x-dev/1_3-C0-rwkv-70_pth', './rwkv-x-dev/chunk6-0_85_pth', './rwkv-x-dev/R4-7B-Base-With-Mask_pth', 'rwkv-x-dev/v5-Eagle-7B-1_0T-HF', './rwkv-x-dev/1_3-C0-PRERUN-rwkv-30_pth', './rwkv-x-dev/chunk7-1-0_85_pth', './rwkv-x-dev/1_3-C1-rwkv-190_pth', './rwkv-x-dev/R4-7B-15t-extd-e3_pth', './rwkv-x-dev/r3-testchunk2_pth', './rwkv-x-dev/Hermes-RWKV-v5-7B_pth', './rwkv-x-dev/1_3-C0-rwkv-153_pth', './rwkv-x-dev/R4-7B-15t-extd-e2_pth', './rwkv-x-dev/r3-testchunk-blink_pth', 'SmerkyG/rwkv-5-world-1b5', 'SmerkyG/rwkv6-world-1b6', 'SmerkyG/rwkv6-world-3b', 'SmerkyG/rwkv-5-world-3b', 'SmerkyG/rwkv-5-world-7b', 'SmerkyG/rwkv5-world-7b', 'togethercomputer/RedPajama-INCITE-7B-Base', 'togethercomputer/RedPajama-INCITE-7B-Instruct', 'togethercomputer/RedPajama-INCITE-7B-Chat', 'facebook/opt-2.7b', 'facebook/opt-6.7b', 'facebook/opt-1.3b', 'tiiuae/falcon-7b-instruct', 'tiiuae/falcon-rw-1b', 'tiiuae/falcon-rw-7b', 'tiiuae/falcon-7b', 'm8than/Finch-14B-Final2', 'm8than/Finch-14B-Continued', 'm8than/Finch-14B-Final', 'm8than/mistral-7b-instruct-0.2', 'm8than/Finch-14B-Continued-10', 'm8than/FinchX-Med', 'TimeMobius/Mobius-RWKV-Chat-12B-128k-v4-HF', 'huggyllama/llama-7b', 'meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-7b-hf', 'state-spaces/mamba-2.8b-hf', 'state-spaces/mamba-1.4b-hf']\n", "Saved to compiled-lm-eval-results.json\n" ] } ], "source": [ "# Lets reset and reprocess all the results.json files\n", "global_result_map = {}\n", "\n", "# Process all the results.json files\n", "for file in results_json_files:\n", " process_results_json(file)\n", "\n", "# Show high level list of models\n", "print(f\"Found {len(global_result_map)} models\")\n", "print(f\"Models: \\n{list(global_result_map.keys())}\")\n", "\n", "# Save the result map to a file\n", "with open('summary/compiled-lm-eval-results.json', 'w') as f:\n", " json.dump(global_result_map, f, sort_keys=True, indent='\\t')\n", "\n", "# Echo that its been saved to json\n", "print(f\"Saved to compiled-lm-eval-results.json\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Convert the results into CSV table formats" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/71/91hw6kks41v97jkjp028xxbr0000gp/T/ipykernel_39274/788387412.py:2: DeprecationWarning: \n", "Pyarrow will become a required dependency of pandas in the next major release of pandas (pandas 3.0),\n", "(to allow more performant data types, such as the Arrow string type, and better interoperability with other libraries)\n", "but was not found to be installed on your system.\n", "If this would cause problems for you,\n", "please provide us feedback at https://github.com/pandas-dev/pandas/issues/54466\n", " \n", " import pandas as pd\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
modelavg_accavg_acc_stderrxcopa (acc)xcopa (acc_stderr)
0mistralai/Mistral-7B-Instruct-v0.20.0000000.000000NaNNaN
1mistralai/Mistral-7B-v0.10.5594550.0538790.5594550.053879
2mosaicml/mpt-7b-instruct0.5370910.0419190.5370910.041919
3mosaicml/mpt-7b0.5360000.0423390.5360000.042339
4mosaicml/mpt-7b-chat0.5380000.0470590.5380000.047059
..................
60huggyllama/llama-7b0.5418180.0407180.5418180.040718
61meta-llama/Llama-2-7b-chat-hf0.5598180.0549540.5598180.054954
62meta-llama/Llama-2-7b-hf0.5667270.0525150.5667270.052515
63state-spaces/mamba-2.8b-hf0.5529090.0355700.5529090.035570
64state-spaces/mamba-1.4b-hf0.5441820.0313900.5441820.031390
\n", "

65 rows × 5 columns

\n", "
" ], "text/plain": [ " model avg_acc avg_acc_stderr xcopa (acc) \\\n", "0 mistralai/Mistral-7B-Instruct-v0.2 0.000000 0.000000 NaN \n", "1 mistralai/Mistral-7B-v0.1 0.559455 0.053879 0.559455 \n", "2 mosaicml/mpt-7b-instruct 0.537091 0.041919 0.537091 \n", "3 mosaicml/mpt-7b 0.536000 0.042339 0.536000 \n", "4 mosaicml/mpt-7b-chat 0.538000 0.047059 0.538000 \n", ".. ... ... ... ... \n", "60 huggyllama/llama-7b 0.541818 0.040718 0.541818 \n", "61 meta-llama/Llama-2-7b-chat-hf 0.559818 0.054954 0.559818 \n", "62 meta-llama/Llama-2-7b-hf 0.566727 0.052515 0.566727 \n", "63 state-spaces/mamba-2.8b-hf 0.552909 0.035570 0.552909 \n", "64 state-spaces/mamba-1.4b-hf 0.544182 0.031390 0.544182 \n", "\n", " xcopa (acc_stderr) \n", "0 NaN \n", "1 0.053879 \n", "2 0.041919 \n", "3 0.042339 \n", "4 0.047059 \n", ".. ... \n", "60 0.040718 \n", "61 0.054954 \n", "62 0.052515 \n", "63 0.035570 \n", "64 0.031390 \n", "\n", "[65 rows x 5 columns]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Lets convert this into a table, which we will display in this notebook, and save as a CSV\n", "import pandas as pd\n", "\n", "##################################################\n", "#\n", "# Utility functions\n", "#\n", "##################################################\n", "\n", "# Check if the given name string, is within the list, including \"*\" wildcard\n", "def is_in_list(name, list):\n", " for n in list:\n", " if n[-1] == '*':\n", " if name.startswith(n[:-1]):\n", " return True\n", " elif n == name:\n", " return True\n", " return False\n", "\n", "# Is in inclusion exclusion list pair\n", "def is_in_list_pair(name, inList, exList):\n", " if not is_in_list(name, inList):\n", " return False\n", " if is_in_list(name, exList):\n", " return False\n", " return True\n", "\n", "# Prepare a single test/group result object\n", "# By applying common filtering and formatting changes\n", "def prepare_test_result(result):\n", " # The reutrn object\n", " ret = {}\n", " # Iterate the result key/value\n", " for k, v in result.items():\n", " # Skip if its alias\n", " if k == 'alias':\n", " continue\n", "\n", " # If the key ends with \",none\", drop the \",none\"\n", " if k.endswith(',none'):\n", " k = k[:-5]\n", " \n", " # Save the result\n", " ret[k] = v\n", " \n", " # Return the result\n", " return ret\n", "\n", "##################################################\n", "#\n", "# Generate the result\n", "#\n", "##################################################\n", "\n", "# Create a list of rows for the table\n", "def generate_result_table(\n", " inConfig = { \"dtype\": \"bfloat16\" },\n", "\n", " # Models to include/exclude\n", " inModels = [\"*\"],\n", " exModels = [\"./rwkv-x-dev/*\", \"rwkv-x-dev\"],\n", "\n", " # Results and groups to include\n", " inResults = [],\n", " inGroups = [\"*\"],\n", "\n", " # Exclude results and groups, applied after inResults and inGroups\n", " exResults = [],\n", " exGroups = [],\n", "\n", " # Sorted\n", " sort = False,\n", " simplified = False\n", "):\n", " table_rows = []\n", "\n", " # Iterate over the models\n", " for model, modelObj in global_result_map.items():\n", " # Skip if not in the inModels or exModels\n", " if not is_in_list_pair(model, inModels, exModels):\n", " continue\n", "\n", " # Iterate over the configurations\n", " for confStr, confSet in modelObj['config'].items():\n", " # Get the confObj\n", " confObj = confSet['confObj']\n", "\n", " # Check if the inConfig, matches the confObj\n", " if inConfig:\n", " skip = False\n", " for k, v in inConfig.items():\n", " if k not in confObj or confObj[k] != v:\n", " skip = True\n", " break\n", " if skip:\n", " continue\n", "\n", " # Create a row object\n", " row = {\n", " 'model': model,\n", " # 'config': confStr\n", "\n", " \"avg_acc\": 0.0,\n", " \"avg_acc_stderr\": 0.0,\n", " }\n", "\n", " # Total acc / acc_stderr\n", " acc_total = 0.0\n", " acc_count = 0\n", " acc_stderr_total = 0.0\n", " acc_stderr_count = 0\n", "\n", " # Add the groups\n", " for test, result in confSet['groups'].items():\n", "\n", " # Skip if not in the inGroups or exGroups\n", " if not is_in_list_pair(test, inGroups, exGroups):\n", " continue\n", "\n", " # Filter the result obj\n", " cleanResult = prepare_test_result(result)\n", "\n", " # Add the result to the row, as seperate columns for each key\n", " for k, v in cleanResult.items():\n", " if k == 'acc':\n", " acc_total += v\n", " acc_count += 1\n", " elif k == 'acc_stderr':\n", " acc_stderr_total += v\n", " acc_stderr_count += 1\n", " \n", " # For simplified, we only use acc and perplexity\n", " if simplified and k not in ['acc', 'perplexity']:\n", " continue\n", "\n", " # Save the value\n", " row[f\"{test} ({k})\"] = v\n", "\n", " # Add the results\n", " for test, result in confSet['results'].items():\n", "\n", " # Skip if not in the inResults or exResults\n", " if not is_in_list_pair(test, inResults, exResults):\n", " continue\n", "\n", " # Filter the result obj\n", " cleanResult = prepare_test_result(result)\n", "\n", " # Add the result to the row, as seperate columns for each key\n", " for k, v in cleanResult.items():\n", " if k == 'acc':\n", " acc_total += v\n", " acc_count += 1\n", " elif k == 'acc_stderr':\n", " acc_stderr_total += v\n", " acc_stderr_count += 1\n", "\n", " # For simplified, we only use acc and perplexity\n", " if simplified and k not in ['acc', 'perplexity']:\n", " continue\n", "\n", " # Save the value\n", " row[f\"{test} ({k})\"] = v\n", " \n", " # Add the avg acc and acc_stderr\n", " if acc_count > 0:\n", " row[\"avg_acc\"] = acc_total / acc_count\n", " if acc_stderr_count > 0:\n", " row[\"avg_acc_stderr\"] = acc_stderr_total / acc_stderr_count\n", "\n", " # Append the row to the table\n", " table_rows.append(row)\n", "\n", " # Create a dataframe from the table rows\n", " df = pd.DataFrame(table_rows)\n", "\n", " # Sort by avg_acc\n", " if sort:\n", " df = df.sort_values(by='avg_acc', ascending=False)\n", "\n", " # Show the dataframe\n", " return df\n", "\n", "# Generate the dataframe\n", "df = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"xcopa\"], inResults=[] )\n", "\n", "# # Save the dataframe to a CSV file\n", "# df.to_csv('summary/compiled-lm-eval-results.csv', index=False)\n", "\n", "# Show results\n", "df\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "total 38760\n", "-rw-r--r--@ 1 picocreator staff 1.4M Aug 12 16:40 bf16-all-results-and-groups.csv\n", "-rw-r--r--@ 1 picocreator staff 373K Aug 12 16:40 bf16-all-simplified-results-and-groups.csv\n", "-rw-r--r--@ 1 picocreator staff 373K Aug 12 16:40 bf16-all-sorted-results-and-groups.csv\n", "-rw-r--r--@ 1 picocreator staff 99K Aug 12 16:40 bf16-eng-focus.csv\n", "-rw-r--r--@ 1 picocreator staff 1.3M Aug 12 16:40 bf16-eng-results.csv\n", "-rw-r--r--@ 1 picocreator staff 113K Aug 12 16:40 bf16-eng-summary.csv\n", "-rw-r--r--@ 1 picocreator staff 142K Aug 12 16:40 bf16-multilang-results.csv\n", "-rw-r--r--@ 1 picocreator staff 20K Aug 12 16:40 bf16-multilang-summary.csv\n", "-rw-r--r--@ 1 picocreator staff 99K Aug 12 16:40 bf16-sorted-eng-focus.csv\n", "-rw-r--r--@ 1 picocreator staff 1.3M Aug 12 16:40 bf16-sorted-eng-results.csv\n", "-rw-r--r--@ 1 picocreator staff 113K Aug 12 16:40 bf16-sorted-eng-summary.csv\n", "-rw-r--r--@ 1 picocreator staff 20K Aug 12 16:40 bf16-sorted-multilang-summary.csv\n", "-rw-r--r-- 1 picocreator staff 11M Aug 12 16:40 compiled-lm-eval-results.json\n", "-rw-r--r--@ 1 picocreator staff 184K Jul 31 15:25 rwkv-x-dev-bf16-sorted-eng-180.csv\n", "-rw-r--r--@ 1 picocreator staff 33K Jul 31 15:25 rwkv-x-dev-bf16-sorted-eng-21-focus.csv\n", "-rw-r--r--@ 1 picocreator staff 129K Aug 12 16:40 rwkv-x-dev-bf16-sorted-eng-all.csv\n", "-rw-r--r--@ 1 picocreator staff 8.4K Aug 12 16:40 rwkv-x-dev-bf16-sorted-eng-focus.csv\n", "-rw-r--r--@ 1 picocreator staff 6.9K Aug 12 16:40 rwkv-x-dev-bf16-sorted-multilang-summary.csv\n" ] } ], "source": [ "##################################################\n", "#\n", "# Build the various subsets\n", "#\n", "##################################################\n", "\n", "FOCUS_MODEL_LIST=[\n", " # \"./rwkv-x-dev/*\", \n", " \"rwkv-x-dev/*\", \"RWKV/*\", \"meta-llama/Llama-2-7b*\", \"mistralai/Mistral-7B-v0.1\", \"m8than/*\"\n", "]\n", "\n", "# Overall results\n", "all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"] )\n", "all_results.to_csv('summary/bf16-all-results-and-groups.csv', index=False)\n", "\n", "all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], simplified=True )\n", "all_results.to_csv('summary/bf16-all-simplified-results-and-groups.csv', index=False)\n", "\n", "all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], simplified=True, sort=True )\n", "all_results.to_csv('summary/bf16-all-sorted-results-and-groups.csv', index=False)\n", "\n", "# Multilang results\n", "multiLang_tTest = [\"xcopa_*\", \"xnli_*\", \"xstorycloze_*\", \"xwinograd_*\", \"lambada_openai_*\", \"pawsx_*\"]\n", "multiLang_tGrps = [\"xcopa\", \"xnli\", \"xstorycloze\", \"xwinograd\", \"lambada_multilingual\", \"pawsx\"]\n", "# Both test and groups, merged into a single list\n", "multiLang_joint = multiLang_tTest + multiLang_tGrps\n", "\n", "multilang_grp = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[] )\n", "multilang_test = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=multiLang_tTest )\n", "multilang_grp.to_csv('summary/bf16-multilang-summary.csv', index=False)\n", "multilang_test.to_csv('summary/bf16-multilang-results.csv', index=False)\n", "\n", "multilang_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[], sort=True )\n", "multilang_grp_sorted.to_csv('summary/bf16-sorted-multilang-summary.csv', index=False)\n", "\n", "# RWKV perf tracking\n", "rwkv_multilang_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=multiLang_tGrps, inResults=[], exModels=[], inModels=FOCUS_MODEL_LIST, sort=True )\n", "rwkv_multilang_grp_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-multilang-summary.csv', index=False)\n", "\n", "# All other results\n", "eng_grp = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[], exGroups=multiLang_joint, exResults=multiLang_joint )\n", "eng_grp_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[], exGroups=multiLang_joint, exResults=multiLang_joint, sort=True )\n", "eng_test = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exGroups=multiLang_joint, exResults=multiLang_joint )\n", "eng_test_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exGroups=multiLang_joint, exResults=multiLang_joint, sort=True )\n", "\n", "eng_grp.to_csv('summary/bf16-eng-summary.csv', index=False)\n", "eng_test.to_csv('summary/bf16-eng-results.csv', index=False)\n", "eng_test_sorted.to_csv('summary/bf16-sorted-eng-results.csv', index=False)\n", "eng_grp_sorted.to_csv('summary/bf16-sorted-eng-summary.csv', index=False)\n", "\n", "# English focused subset\n", "eng_focus_mixed=[\"lambada_openai\", \"lambada_standard\", \"blimp\", \"piqa\", \"copa\", \"sciq\", \"truthfulqa\", \"pythia\"] #\"np_open\", \"cmmlu\", \"record\"\n", "eng_focus_tGrps=[\"anli\", \"glue\", \"mmlu\" ]\n", "eng_focus_tTest=[\"blimp\", \"arc_*\", \"logiqa\", \"winogrande\", \"openbookqa\", \"hellaswag\"]\n", "\n", "eng_focus_tGrps = eng_focus_tGrps + eng_focus_mixed\n", "eng_focus_tTest = eng_focus_tTest + eng_focus_mixed\n", "\n", "eng_focus = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=eng_focus_tGrps, inResults=eng_focus_tTest )\n", "eng_focus_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=eng_focus_tGrps, inResults=eng_focus_tTest, sort=True )\n", "eng_focus.to_csv('summary/bf16-eng-focus.csv', index=False)\n", "eng_focus_sorted.to_csv('summary/bf16-sorted-eng-focus.csv', index=False)\n", "\n", "# RWKV perf tracking\n", "rwkv_eng_focus_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=eng_focus_tGrps, inResults=eng_focus_tTest, exModels=[], inModels=FOCUS_MODEL_LIST, sort=True, simplified=True )\n", "rwkv_eng_focus_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-focus.csv', index=False)\n", "\n", "# RWKV perf tracking\n", "rwkv_eng_all_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], exModels=[], inModels=FOCUS_MODEL_LIST, sort=True, simplified=True )\n", "rwkv_eng_all_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-all.csv', index=False)\n", "\n", "# # Overall results\n", "# rwkv_all_results = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=[\"*\"], inResults=[\"*\"], inModels=[\"./rwkv-x-dev/*\", \"rwkv-x-dev/*\", \"RWKV/*\"], exModels=[], sort=True )\n", "# rwkv_all_results.to_csv('summary/rwkv-x-dev-bf16-all-results-and-groups.csv', index=False)\n", "\n", "# List the files\n", "!ls -lh summary" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "# 21 eval focus\n", "focus_21=[\"sciq\", \"glue\", \"anli\", \"mnli\", \"mnli_mismatch\", \"swag\", \"winogrande\", \"wnli\", \"truthfulqa\", \"logiqa\", \"logiqa2\", \"lambada_standard\", \"lambada_openai\", \"mmlu\", \"piqa\", \"arc_easy\", \"arc_challenge\", \"hellaswag\", \"openbookqa\", \"mathqa\", \"arithmetic\"]\n", "focus_21_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=focus_21, inResults=focus_21, exModels=[], inModels=[\"./rwkv-x-dev/*\", \"rwkv-x-dev/*\", \"RWKV/*\", \"meta-llama/Llama-2-7b*\", \"mistralai/Mistral-7B-v0.1\"], sort=True, simplified=True )\n", "focus_21_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-21-focus.csv', index=False)\n", "\n", "# English 180\n", "eng_180=[\n", " \"anli\",\n", " \"glue\",\n", " \"blimp\",\n", " \"truthfulqa\",\n", " \"lambada\",\n", " \"ai2_arc\",\n", " \"multimedqa\",\n", " \"pythia\",\n", " \"mathqa\",\n", " \"mmlu\",\n", " \"anli_r1\",\n", " \"anli_r2\",\n", " \"anli_r3\",\n", " \"wsc\",\n", " \"lambada_standard_cloze_yaml\",\n", " \"mnli\",\n", " \"mnli_mismatch\",\n", " \"mrpc\",\n", " \"qnli\",\n", " \"qqp\",\n", " \"rte\",\n", " \"sst2\",\n", " \"wnli\",\n", " \"blimp_adjunct_island\",\n", " \"blimp_anaphor_gender_agreement\",\n", " \"blimp_anaphor_number_agreement\",\n", " \"blimp_animate_subject_passive\",\n", " \"blimp_animate_subject_trans\",\n", " \"blimp_causative\",\n", " \"blimp_complex_NP_island\",\n", " \"blimp_coordinate_structure_constraint_complex_left_branch\",\n", " \"blimp_coordinate_structure_constraint_object_extraction\",\n", " \"blimp_determiner_noun_agreement_1\",\n", " \"blimp_determiner_noun_agreement_2\",\n", " \"blimp_determiner_noun_agreement_irregular_1\",\n", " \"blimp_determiner_noun_agreement_irregular_2\",\n", " \"blimp_determiner_noun_agreement_with_adj_2\",\n", " \"blimp_determiner_noun_agreement_with_adj_irregular_1\",\n", " \"blimp_determiner_noun_agreement_with_adj_irregular_2\",\n", " \"blimp_determiner_noun_agreement_with_adjective_1\",\n", " \"blimp_distractor_agreement_relational_noun\",\n", " \"blimp_distractor_agreement_relative_clause\",\n", " \"blimp_drop_argument\",\n", " \"blimp_ellipsis_n_bar_1\",\n", " \"blimp_ellipsis_n_bar_2\",\n", " \"blimp_existential_there_object_raising\",\n", " \"blimp_existential_there_quantifiers_1\",\n", " \"blimp_existential_there_quantifiers_2\",\n", " \"blimp_existential_there_subject_raising\",\n", " \"blimp_expletive_it_object_raising\",\n", " \"blimp_inchoative\",\n", " \"blimp_intransitive\",\n", " \"blimp_irregular_past_participle_adjectives\",\n", " \"blimp_irregular_past_participle_verbs\",\n", " \"blimp_irregular_plural_subject_verb_agreement_1\",\n", " \"blimp_irregular_plural_subject_verb_agreement_2\",\n", " \"blimp_left_branch_island_echo_question\",\n", " \"blimp_left_branch_island_simple_question\",\n", " \"blimp_matrix_question_npi_licensor_present\",\n", " \"blimp_npi_present_1\",\n", " \"blimp_npi_present_2\",\n", " \"blimp_only_npi_licensor_present\",\n", " \"blimp_only_npi_scope\",\n", " \"blimp_passive_1\",\n", " \"blimp_passive_2\",\n", " \"blimp_principle_A_c_command\",\n", " \"blimp_principle_A_case_1\",\n", " \"blimp_principle_A_case_2\",\n", " \"blimp_principle_A_domain_1\",\n", " \"blimp_principle_A_domain_2\",\n", " \"blimp_principle_A_domain_3\",\n", " \"blimp_principle_A_reconstruction\",\n", " \"blimp_regular_plural_subject_verb_agreement_1\",\n", " \"blimp_regular_plural_subject_verb_agreement_2\",\n", " \"blimp_sentential_negation_npi_licensor_present\",\n", " \"blimp_sentential_negation_npi_scope\",\n", " \"blimp_sentential_subject_island\",\n", " \"blimp_superlative_quantifiers_1\",\n", " \"blimp_superlative_quantifiers_2\",\n", " \"blimp_tough_vs_raising_1\",\n", " \"blimp_tough_vs_raising_2\",\n", " \"blimp_transitive\",\n", " \"blimp_wh_island\",\n", " \"blimp_wh_questions_object_gap\",\n", " \"blimp_wh_questions_subject_gap\",\n", " \"blimp_wh_questions_subject_gap_long_distance\",\n", " \"blimp_wh_vs_that_no_gap\",\n", " \"blimp_wh_vs_that_no_gap_long_distance\",\n", " \"blimp_wh_vs_that_with_gap\",\n", " \"blimp_wh_vs_that_with_gap_long_distance\",\n", " \"sciq\",\n", " \"truthfulqa_mc1\",\n", " \"truthfulqa_mc2\",\n", " \"multirc\",\n", " \"lambada_openai\",\n", " \"lambada_standard\",\n", " \"piqa\",\n", " \"prost\",\n", " \"wsc273\",\n", " \"qa4mre_2011\",\n", " \"qa4mre_2012\",\n", " \"qa4mre_2013\",\n", " \"arc_challenge\",\n", " \"arc_easy\",\n", " \"logiqa\",\n", " \"winogrande\",\n", " \"boolq\",\n", " \"logiqa2\",\n", " \"openbookqa\",\n", " \"medmcqa\",\n", " \"medqa_4options\",\n", " \"mmlu_anatomy\",\n", " \"mmlu_clinical_knowledge\",\n", " \"mmlu_college_biology\",\n", " \"mmlu_college_medicine\",\n", " \"mmlu_medical_genetics\",\n", " \"mmlu_professional_medicine\",\n", " \"pubmedqa\",\n", " \"mc_taco\",\n", " \"lambada_openai_mt_de\",\n", " \"lambada_openai_mt_en\",\n", " \"lambada_openai_mt_es\",\n", " \"lambada_openai_mt_fr\",\n", " \"lambada_openai_mt_it\",\n", " \"mmlu_formal_logic\",\n", " \"mmlu_high_school_european_history\",\n", " \"mmlu_high_school_us_history\",\n", " \"mmlu_high_school_world_history\",\n", " \"mmlu_international_law\",\n", " \"mmlu_jurisprudence\",\n", " \"mmlu_logical_fallacies\",\n", " \"mmlu_moral_disputes\",\n", " \"mmlu_moral_scenarios\",\n", " \"mmlu_philosophy\",\n", " \"mmlu_prehistory\",\n", " \"mmlu_professional_law\",\n", " \"mmlu_world_religions\",\n", " \"mmlu_business_ethics\",\n", " \"mmlu_global_facts\",\n", " \"mmlu_human_aging\",\n", " \"mmlu_management\",\n", " \"mmlu_marketing\",\n", " \"mmlu_miscellaneous\",\n", " \"mmlu_nutrition\",\n", " \"mmlu_professional_accounting\",\n", " \"mmlu_virology\",\n", " \"mmlu_econometrics\",\n", " \"mmlu_high_school_geography\",\n", " \"mmlu_high_school_government_and_politics\",\n", " \"mmlu_high_school_macroeconomics\",\n", " \"mmlu_high_school_microeconomics\",\n", " \"mmlu_high_school_psychology\",\n", " \"mmlu_human_sexuality\",\n", " \"mmlu_professional_psychology\",\n", " \"mmlu_public_relations\",\n", " \"mmlu_security_studies\",\n", " \"mmlu_sociology\",\n", " \"mmlu_us_foreign_policy\",\n", " \"mmlu_abstract_algebra\",\n", " \"mmlu_astronomy\",\n", " \"mmlu_college_chemistry\",\n", " \"mmlu_college_computer_science\",\n", " \"mmlu_college_mathematics\",\n", " \"mmlu_college_physics\",\n", " \"mmlu_computer_security\",\n", " \"mmlu_conceptual_physics\",\n", " \"mmlu_electrical_engineering\",\n", " \"mmlu_elementary_mathematics\",\n", " \"mmlu_high_school_biology\",\n", " \"mmlu_high_school_chemistry\",\n", " \"mmlu_high_school_computer_science\",\n", " \"mmlu_high_school_mathematics\",\n", " \"mmlu_high_school_physics\",\n", " \"mmlu_high_school_statistics\",\n", " \"mmlu_machine_learning\",\n", " \"wic\",\n", " \"swag\",\n", " \"hellaswag\",\n", " \"cb\",\n", " \"sglue_rte\"\n", "]\n", "eng_180_sorted = generate_result_table( inConfig = { \"dtype\": \"bfloat16\" }, inGroups=eng_180, inResults=eng_180, exModels=[], inModels=[\"./rwkv-x-dev/*\", \"rwkv-x-dev/*\", \"RWKV/*\", \"meta-llama/Llama-2-7b*\", \"mistralai/Mistral-7B-v0.1\"], sort=True, simplified=True )\n", "eng_180_sorted.to_csv('summary/rwkv-x-dev-bf16-sorted-eng-180.csv', index=False)\n", "\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" } }, "nbformat": 4, "nbformat_minor": 2 }