{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Vp5YVvaTpIiX", "outputId": "3c5b2a63-1fb4-430d-a986-4092ee8d4891" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "--2023-11-28 19:52:23-- https://huggingface.co/spaces/lmsys/mt-bench/resolve/main/data/mt_bench/model_judgment/gpt-4_single.jsonl\n", "Resolving huggingface.co (huggingface.co)... 18.164.174.55, 18.164.174.23, 18.164.174.118, ...\n", "Connecting to huggingface.co (huggingface.co)|18.164.174.55|:443... connected.\n", "HTTP request sent, awaiting response... 302 Found\n", "Location: https://cdn-lfs.huggingface.co/repos/12/2b/122bd8e9eccbb3acc98acf73e0ecef3c96f24dcdb5f6639074ed304eb19f9cd4/76c55033c6b2b1cc3f62513458f84748a23352495fd42b1062a7401de5ff9bd9?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27gpt-4_single.jsonl%3B+filename%3D%22gpt-4_single.jsonl%22%3B&Expires=1701460343&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwMTQ2MDM0M319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8xMi8yYi8xMjJiZDhlOWVjY2JiM2FjYzk4YWNmNzNlMGVjZWYzYzk2ZjI0ZGNkYjVmNjYzOTA3NGVkMzA0ZWIxOWY5Y2Q0Lzc2YzU1MDMzYzZiMmIxY2MzZjYyNTEzNDU4Zjg0NzQ4YTIzMzUyNDk1ZmQ0MmIxMDYyYTc0MDFkZTVmZjliZDk%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=hwTqBVlLz755xHaQaN6cSDP2FxoPBAXFcOE2uvFAYzg0Y90kGkY3A74Fj2wAkToA-dN1WJeMc%7Ef2XarD%7EbAw%7E4v2JCw9kphUxL-pcRF1uNBI2pzS-3Joff-m%7Ee3GVq5%7E8QabDfK60nWuA10CodvlaRDqVpuYEAvF2n5tY3Adf6-V-YdcaxE2DTlHXm65oJsJwWJTGiQYzTtn4rEVWKgQHVYp7CqX0IdyaILr966agOZvdUGDUZfkZtG6E9A6zKOgOBfdpJn1tjmMKEkDscDvLJvg8r9QJY7yttPHOMNVruzVtoLjpg1lFb-tXco3h%7EFZVKiOIZL%7E597WbaDu8hdZOQ__&Key-Pair-Id=KVTP0A1DKRTAX [following]\n", "--2023-11-28 19:52:23-- https://cdn-lfs.huggingface.co/repos/12/2b/122bd8e9eccbb3acc98acf73e0ecef3c96f24dcdb5f6639074ed304eb19f9cd4/76c55033c6b2b1cc3f62513458f84748a23352495fd42b1062a7401de5ff9bd9?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27gpt-4_single.jsonl%3B+filename%3D%22gpt-4_single.jsonl%22%3B&Expires=1701460343&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwMTQ2MDM0M319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8xMi8yYi8xMjJiZDhlOWVjY2JiM2FjYzk4YWNmNzNlMGVjZWYzYzk2ZjI0ZGNkYjVmNjYzOTA3NGVkMzA0ZWIxOWY5Y2Q0Lzc2YzU1MDMzYzZiMmIxY2MzZjYyNTEzNDU4Zjg0NzQ4YTIzMzUyNDk1ZmQ0MmIxMDYyYTc0MDFkZTVmZjliZDk%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=hwTqBVlLz755xHaQaN6cSDP2FxoPBAXFcOE2uvFAYzg0Y90kGkY3A74Fj2wAkToA-dN1WJeMc%7Ef2XarD%7EbAw%7E4v2JCw9kphUxL-pcRF1uNBI2pzS-3Joff-m%7Ee3GVq5%7E8QabDfK60nWuA10CodvlaRDqVpuYEAvF2n5tY3Adf6-V-YdcaxE2DTlHXm65oJsJwWJTGiQYzTtn4rEVWKgQHVYp7CqX0IdyaILr966agOZvdUGDUZfkZtG6E9A6zKOgOBfdpJn1tjmMKEkDscDvLJvg8r9QJY7yttPHOMNVruzVtoLjpg1lFb-tXco3h%7EFZVKiOIZL%7E597WbaDu8hdZOQ__&Key-Pair-Id=KVTP0A1DKRTAX\n", "Resolving cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)... 18.65.25.40, 18.65.25.122, 18.65.25.124, ...\n", "Connecting to cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)|18.65.25.40|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 20113128 (19M) [text/plain]\n", "Saving to: ‘gpt-4_single.jsonl’\n", "\n", "gpt-4_single.jsonl 100%[===================>] 19.18M 25.8MB/s in 0.7s \n", "\n", "2023-11-28 19:52:25 (25.8 MB/s) - ‘gpt-4_single.jsonl’ saved [20113128/20113128]\n", "\n", "--2023-11-28 19:52:25-- https://huggingface.co/spaces/lmsys/mt-bench/resolve/main/data/mt_bench/model_judgment/gpt-4_pair.jsonl\n", "Resolving huggingface.co (huggingface.co)... 18.164.174.55, 18.164.174.23, 18.164.174.118, ...\n", "Connecting to huggingface.co (huggingface.co)|18.164.174.55|:443... connected.\n", "HTTP request sent, awaiting response... 302 Found\n", "Location: https://cdn-lfs.huggingface.co/repos/12/2b/122bd8e9eccbb3acc98acf73e0ecef3c96f24dcdb5f6639074ed304eb19f9cd4/d662c0b7d1d297f0494fcb4cc09fe8f054fa22d75deb4754a483a921984bc585?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27gpt-4_pair.jsonl%3B+filename%3D%22gpt-4_pair.jsonl%22%3B&Expires=1701460345&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwMTQ2MDM0NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8xMi8yYi8xMjJiZDhlOWVjY2JiM2FjYzk4YWNmNzNlMGVjZWYzYzk2ZjI0ZGNkYjVmNjYzOTA3NGVkMzA0ZWIxOWY5Y2Q0L2Q2NjJjMGI3ZDFkMjk3ZjA0OTRmY2I0Y2MwOWZlOGYwNTRmYTIyZDc1ZGViNDc1NGE0ODNhOTIxOTg0YmM1ODU%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=RcHQsWboSyCegZB6o-k6-9fsGpTmhArmdubGyrc7VTT2cc9FKMoPc4vHW0RtMgS%7EkYWm2eA9sfex%7EWN%7E5A0i1CBBWP3EDq365Jt52BdOw4BbOtezicyT2eLPzNkgrw3RuLMZTApHUr6md1TVm0W15rmSaUpoQT5sKcVwq%7EvmmLXr6AFOV6vWho6vEHSadzT8GJkK%7El9xOtBGhCE-pWOsEU6siX9sw0HwZBmg1mcXJzMj2du%7Em5AmG3lXsJm2fFY0ZmhSZjm7FH%7EBxF38wTuuf3gBUeJUU%7Ecx0Lv935FSAmmdzqrXO4CiGq%7EQSTp7uga8mUJikosX6DlfLMZudAIVzg__&Key-Pair-Id=KVTP0A1DKRTAX [following]\n", "--2023-11-28 19:52:25-- https://cdn-lfs.huggingface.co/repos/12/2b/122bd8e9eccbb3acc98acf73e0ecef3c96f24dcdb5f6639074ed304eb19f9cd4/d662c0b7d1d297f0494fcb4cc09fe8f054fa22d75deb4754a483a921984bc585?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27gpt-4_pair.jsonl%3B+filename%3D%22gpt-4_pair.jsonl%22%3B&Expires=1701460345&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwMTQ2MDM0NX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy8xMi8yYi8xMjJiZDhlOWVjY2JiM2FjYzk4YWNmNzNlMGVjZWYzYzk2ZjI0ZGNkYjVmNjYzOTA3NGVkMzA0ZWIxOWY5Y2Q0L2Q2NjJjMGI3ZDFkMjk3ZjA0OTRmY2I0Y2MwOWZlOGYwNTRmYTIyZDc1ZGViNDc1NGE0ODNhOTIxOTg0YmM1ODU%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=RcHQsWboSyCegZB6o-k6-9fsGpTmhArmdubGyrc7VTT2cc9FKMoPc4vHW0RtMgS%7EkYWm2eA9sfex%7EWN%7E5A0i1CBBWP3EDq365Jt52BdOw4BbOtezicyT2eLPzNkgrw3RuLMZTApHUr6md1TVm0W15rmSaUpoQT5sKcVwq%7EvmmLXr6AFOV6vWho6vEHSadzT8GJkK%7El9xOtBGhCE-pWOsEU6siX9sw0HwZBmg1mcXJzMj2du%7Em5AmG3lXsJm2fFY0ZmhSZjm7FH%7EBxF38wTuuf3gBUeJUU%7Ecx0Lv935FSAmmdzqrXO4CiGq%7EQSTp7uga8mUJikosX6DlfLMZudAIVzg__&Key-Pair-Id=KVTP0A1DKRTAX\n", "Resolving cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)... 18.65.25.40, 18.65.25.122, 18.65.25.124, ...\n", "Connecting to cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)|18.65.25.40|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 48043462 (46M) [binary/octet-stream]\n", "Saving to: ‘gpt-4_pair.jsonl’\n", "\n", "gpt-4_pair.jsonl 100%[===================>] 45.82M 36.0MB/s in 1.3s \n", "\n", "2023-11-28 19:52:27 (36.0 MB/s) - ‘gpt-4_pair.jsonl’ saved [48043462/48043462]\n", "\n" ] } ], "source": [ "!wget https://huggingface.co/spaces/lmsys/mt-bench/resolve/main/data/mt_bench/model_judgment/gpt-4_single.jsonl\n", "!wget https://huggingface.co/spaces/lmsys/mt-bench/resolve/main/data/mt_bench/model_judgment/gpt-4_pair.jsonl" ] }, { "cell_type": "code", "source": [ "!pip install -U plotly kaleido" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "4eYlKr9RrPu2", "outputId": "b957d1f9-0024-4c5c-eb07-dcb1a0071081" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Requirement already satisfied: plotly in /usr/local/lib/python3.10/dist-packages (5.15.0)\n", "Collecting plotly\n", " Downloading plotly-5.18.0-py3-none-any.whl (15.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.6/15.6 MB\u001b[0m \u001b[31m27.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting kaleido\n", " Downloading kaleido-0.2.1-py2.py3-none-manylinux1_x86_64.whl (79.9 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.9/79.9 MB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: tenacity>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from plotly) (8.2.3)\n", "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from plotly) (23.2)\n", "Installing collected packages: kaleido, plotly\n", " Attempting uninstall: plotly\n", " Found existing installation: plotly 5.15.0\n", " Uninstalling plotly-5.15.0:\n", " Successfully uninstalled plotly-5.15.0\n", "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "lida 0.0.10 requires fastapi, which is not installed.\n", "lida 0.0.10 requires python-multipart, which is not installed.\n", "lida 0.0.10 requires uvicorn, which is not installed.\u001b[0m\u001b[31m\n", "\u001b[0mSuccessfully installed kaleido-0.2.1 plotly-5.18.0\n" ] } ] }, { "cell_type": "code", "source": [ "import json\n", "import pandas as pd\n", "import plotly.express as px\n", "import plotly.graph_objects as go\n", "\n", "\n", "CATEGORIES = [\"Writing\", \"Roleplay\", \"Reasoning\", \"Math\", \"Coding\", \"Extraction\", \"STEM\", \"Humanities\"]\n", "\n", "\n", "def get_model_df():\n", " cnt = 0\n", " q2result = []\n", " fin = open(\"gpt-4_single.jsonl\", \"r\")\n", " for line in fin:\n", " obj = json.loads(line)\n", " obj[\"category\"] = CATEGORIES[(obj[\"question_id\"]-81)//10]\n", " q2result.append(obj)\n", " df = pd.DataFrame(q2result)\n", " return df\n", "\n", "def toggle(res_str):\n", " if res_str == \"win\":\n", " return \"loss\"\n", " elif res_str == \"loss\":\n", " return \"win\"\n", " return \"tie\"\n", "\n", "def get_model_df_pair():\n", " fin = open(\"gpt-4_pair.jsonl\", \"r\")\n", " cnt = 0\n", " q2result = []\n", " for line in fin:\n", " obj = json.loads(line)\n", "\n", " result = {}\n", " result[\"qid\"] = str(obj[\"question_id\"])\n", " result[\"turn\"] = str(obj[\"turn\"])\n", " if obj[\"g1_winner\"] == \"model_1\" and obj[\"g2_winner\"] == \"model_1\":\n", " result[\"result\"] = \"win\"\n", " elif obj[\"g1_winner\"] == \"model_2\" and obj[\"g2_winner\"] == \"model_2\":\n", " result[\"result\"] = \"loss\"\n", " else:\n", " result[\"result\"] = \"tie\"\n", " result[\"category\"] = CATEGORIES[(obj[\"question_id\"]-81)//10]\n", " result[\"model\"] = obj[\"model_1\"]\n", " q2result.append(result)\n", "\n", " df = pd.DataFrame(q2result)\n", "\n", " return df\n", "\n", "df = get_model_df()\n", "df_pair = get_model_df_pair()" ], "metadata": { "id": "m2tG_vDyqWZw" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "df_pair" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 423 }, "id": "wUw1sxfmaGuK", "outputId": "21365f64-c2fa-47c7-9ad4-ca114eac6533" }, "execution_count": null, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ " qid turn result category model\n", "0 81 1 loss Writing alpaca-13b\n", "1 81 2 loss Writing alpaca-13b\n", "2 82 1 loss Writing alpaca-13b\n", "3 82 2 loss Writing alpaca-13b\n", "4 83 1 loss Writing alpaca-13b\n", "... ... ... ... ... ...\n", "4795 158 2 tie Humanities wizardlm-30b\n", "4796 159 1 loss Humanities wizardlm-30b\n", "4797 159 2 win Humanities wizardlm-30b\n", "4798 160 1 loss Humanities wizardlm-30b\n", "4799 160 2 tie Humanities wizardlm-30b\n", "\n", "[4800 rows x 5 columns]" ], "text/html": [ "\n", "
\n", "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
qidturnresultcategorymodel
0811lossWritingalpaca-13b
1812lossWritingalpaca-13b
2821lossWritingalpaca-13b
3822lossWritingalpaca-13b
4831lossWritingalpaca-13b
..................
47951582tieHumanitieswizardlm-30b
47961591lossHumanitieswizardlm-30b
47971592winHumanitieswizardlm-30b
47981601lossHumanitieswizardlm-30b
47991602tieHumanitieswizardlm-30b
\n", "

4800 rows × 5 columns

\n", "
\n", "
\n", "\n", "
\n", " \n", "\n", " \n", "\n", " \n", "
\n", "\n", "\n", "
\n", " \n", "\n", "\n", "\n", " \n", "
\n", "
\n", "
\n" ] }, "metadata": {}, "execution_count": 4 } ] }, { "cell_type": "code", "source": [ "all_models = df[\"model\"].unique()\n", "print(all_models)\n", "scores_all = []\n", "for model in all_models:\n", " for cat in CATEGORIES:\n", " # filter category/model, and score format error (<1% case)\n", " res = df[(df[\"category\"]==cat) & (df[\"model\"]==model) & (df[\"score\"] >= 0)]\n", " score = res[\"score\"].mean()\n", "\n", " # # pairwise result\n", " # res_pair = df_pair[(df_pair[\"category\"]==cat) & (df_pair[\"model\"]==model)][\"result\"].value_counts()\n", " # wincnt = res_pair[\"win\"] if \"win\" in res_pair.index else 0\n", " # tiecnt = res_pair[\"tie\"] if \"tie\" in res_pair.index else 0\n", " # winrate = wincnt/res_pair.sum()\n", " # winrate_adjusted = (wincnt + tiecnt)/res_pair.sum()\n", " # # print(winrate_adjusted)\n", "\n", " # scores_all.append({\"model\": model, \"category\": cat, \"score\": score, \"winrate\": winrate, \"wtrate\": winrate_adjusted})\n", " scores_all.append({\"model\": model, \"category\": cat, \"score\": score})" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "MpBKLuVmqZ7O", "outputId": "f7ea476f-dde8-4b7c-fb69-5d7d33999caf" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "['alpaca-13b' 'baize-v2-13b' 'chatglm-6b' 'claude-instant-v1' 'claude-v1'\n", " 'dolly-v2-12b' 'falcon-40b-instruct' 'fastchat-t5-3b' 'gpt-3.5-turbo'\n", " 'gpt-4' 'gpt4all-13b-snoozy' 'guanaco-33b' 'guanaco-65b'\n", " 'h2ogpt-oasst-open-llama-13b' 'koala-13b' 'llama-13b' 'mpt-30b-chat'\n", " 'mpt-30b-instruct' 'mpt-7b-chat' 'nous-hermes-13b'\n", " 'oasst-sft-4-pythia-12b' 'oasst-sft-7-llama-30b' 'palm-2-chat-bison-001'\n", " 'rwkv-4-raven-14b' 'stablelm-tuned-alpha-7b' 'tulu-30b' 'vicuna-13b-v1.3'\n", " 'vicuna-33b-v1.3' 'vicuna-7b-v1.3' 'wizardlm-13b' 'wizardlm-30b'\n", " 'Llama-2-7b-chat' 'Llama-2-13b-chat' 'Llama-2-70b-chat']\n" ] } ] }, { "cell_type": "code", "source": [ "target_models = [\"Llama-2-7b-chat\", \"Llama-2-13b-chat\", \"Llama-2-70b-chat\", \"gpt-3.5-turbo\", \"claude-v1\", \"gpt-4\"]\n", "\n", "scores_target = [scores_all[i] for i in range(len(scores_all)) if scores_all[i][\"model\"] in target_models]\n", "\n", "# sort by target_models\n", "scores_target = sorted(scores_target, key=lambda x: target_models.index(x[\"model\"]), reverse=True)\n", "\n", "df_score = pd.DataFrame(scores_target)\n", "df_score = df_score[df_score[\"model\"].isin(target_models)]\n", "\n", "rename_map = {\"llama-13b\": \"LLaMA-13B\",\n", " \"alpaca-13b\": \"Alpaca-13B\",\n", " \"vicuna-33b-v1.3\": \"Vicuna-33B\",\n", " \"vicuna-13b-v1.3\": \"Vicuna-13B\",\n", " \"gpt-3.5-turbo\": \"GPT-3.5-turbo\",\n", " \"claude-v1\": \"Claude-v1\",\n", " \"gpt-4\": \"GPT-4\"}\n", "\n", "for k, v in rename_map.items():\n", " df_score.replace(k, v, inplace=True)\n", "\n", "fig = px.line_polar(df_score, r = 'score', theta = 'category', line_close = True, category_orders = {\"category\": CATEGORIES},\n", " color = 'model', markers=True, color_discrete_sequence=px.colors.qualitative.Pastel)\n", "\n", "fig.show()" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 542 }, "id": "5i8R0l-XqkgO", "outputId": "10151ab6-cf3d-4162-a0cf-c510f2e3968a" }, "execution_count": null, "outputs": [ { "output_type": "display_data", "data": { "text/html": [ "\n", "\n", "\n", "
\n", "
\n", "\n", "" ] }, "metadata": {} } ] }, { "cell_type": "code", "source": [ "# fig = px.line_polar(df_score, r = 'wtrate', theta = 'category', line_close = True, category_orders = {\"category\": CATEGORIES},\n", "# color = 'model', markers=True, color_discrete_sequence=px.colors.qualitative.Pastel)\n", "# fig.show()" ], "metadata": { "id": "MaBaUN4IqvJI" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "fig.update_layout(\n", " font=dict(\n", " size=18,\n", " ),\n", ")\n", "fig.write_image(\"fig.png\", width=800, height=600, scale=2)" ], "metadata": { "id": "4l1bzYM2bgDW" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [], "metadata": { "id": "nfpERnxFANhV" }, "execution_count": null, "outputs": [] } ] }