alozowski commited on
Commit
75ec077
1 Parent(s): 7dd58ca

Resubmit failed models

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 01-ai/Yi-Coder-9B-Chat_eval_request_False_bfloat16_Original.json +1 -1
  2. 01-ai/Yi-Coder-9B_eval_request_False_bfloat16_Original.json +1 -1
  3. ArliAI/ArliAI-RPMax-12B-v1.1_eval_request_False_bfloat16_Original.json +1 -1
  4. BEE-spoke-data/tFINE-900m-e16-d32-flan_eval_request_False_bfloat16_Original.json +1 -1
  5. BeaverAI/mistral-doryV2-12b_eval_request_False_bfloat16_Original.json +1 -1
  6. ClaudioItaly/Albacus_eval_request_False_bfloat16_Original.json +1 -1
  7. DavidAU/L3-Dark-Planet-8B_eval_request_False_bfloat16_Original.json +1 -1
  8. DavidAU/L3-Lumimaid-12.2B-v0.1-OAS-Instruct_eval_request_False_float16_Original.json +1 -1
  9. DavidAU/L3-SMB-Instruct-12.2B-F32_eval_request_False_float16_Original.json +1 -1
  10. DavidAU/L3-Stheno-v3.2-12.2B-Instruct_eval_request_False_bfloat16_Original.json +1 -1
  11. Dongwookss/small_fut_final_eval_request_False_float16_Original.json +1 -1
  12. DreadPoor/Heart_Stolen-ALT-8B-Model_Stock_eval_request_False_bfloat16_Original.json +1 -1
  13. EpistemeAI/Alpaca-Llama3.1-8B_eval_request_False_float16_Original.json +1 -1
  14. EpistemeAI2/Fireball-Alpaca-Llama3.1.04-8B-Philos_eval_request_False_float16_Original.json +1 -1
  15. EpistemeAI2/Fireball-Alpaca-Llama3.1.06-8B-Philos-dpo_eval_request_False_float16_Original.json +1 -1
  16. EpistemeAI2/Fireball-Alpaca-Llama3.1.07-8B-Philos-Math_eval_request_False_float16_Original.json +1 -1
  17. FuJhen/ft-openhermes-25-mistral-7b-irca-dpo-pairs_eval_request_False_bfloat16_Adapter.json +1 -1
  18. FuJhen/mistral-instruct-7B-DPO_eval_request_False_bfloat16_Adapter.json +1 -1
  19. GalrionSoftworks/MN-LooseCannon-12B-v1_eval_request_False_bfloat16_Original.json +1 -1
  20. Josephgflowers/TinyLlama_v1.1_math_code-world-test-1_eval_request_False_float16_Original.json +1 -1
  21. KSU-HW-SEC/Llama3-70b-SVA-FT-1000_eval_request_False_bfloat16_Original.json +1 -1
  22. KSU-HW-SEC/Llama3-70b-SVA-FT-1415_eval_request_False_bfloat16_Original.json +1 -1
  23. KSU-HW-SEC/Llama3-70b-SVA-FT-500_eval_request_False_bfloat16_Original.json +1 -1
  24. KSU-HW-SEC/Llama3-70b-SVA-FT-final_eval_request_False_bfloat16_Original.json +1 -1
  25. KSU-HW-SEC/Llama3.1-70b-SVA-FT-1000step_eval_request_False_bfloat16_Original.json +1 -1
  26. KSU-HW-SEC/Llama3.1-70b-SVA-FT-1415step_eval_request_False_bfloat16_Original.json +1 -1
  27. KSU-HW-SEC/Llama3.1-70b-SVA-FT-500step_eval_request_False_bfloat16_Original.json +1 -1
  28. LeroyDyer/_Spydaz_Web_AI_ChatQA_001_SFT_eval_request_False_bfloat16_Original.json +1 -1
  29. LeroyDyer/_Spydaz_Web_AI_ChatQA_001_UFT_eval_request_False_bfloat16_Original.json +1 -1
  30. MaziyarPanahi/calme-2.1-llama3.1-70b_eval_request_False_bfloat16_Original.json +1 -1
  31. Mxode/NanoLM-0.3B-Instruct-v1.1_eval_request_False_bfloat16_Original.json +1 -1
  32. Mxode/NanoLM-0.3B-Instruct-v1_eval_request_False_bfloat16_Original.json +1 -1
  33. Mxode/NanoLM-0.3B-Instruct-v2_eval_request_False_bfloat16_Original.json +1 -1
  34. Mxode/NanoLM-1B-Instruct-v1.1_eval_request_False_bfloat16_Original.json +1 -1
  35. Mxode/NanoLM-1B-Instruct-v2_eval_request_False_bfloat16_Original.json +1 -1
  36. OpenBuddy/openbuddy-llama3.1-70b-v22.1-131k_eval_request_False_bfloat16_Original.json +1 -1
  37. Rakuten/RakutenAI-7B_eval_request_False_float16_Original.json +1 -1
  38. Trappu/Magnum-Picaro-0.7-v2-12b_eval_request_False_bfloat16_Original.json +1 -1
  39. VIRNECT/llama-3-Korean-8B-r-v-0.1_eval_request_False_float16_Adapter.json +1 -1
  40. Weyaxi/Bagel-Hermes-34B-Slerp_eval_request_False_bfloat16_Original.json +1 -1
  41. aaditya/Llama3-OpenBioLLM-70B_eval_request_False_bfloat16_Original.json +1 -1
  42. abdullahriaz/llama3.1-med-bot_eval_request_False_4bit_Adapter.json +1 -1
  43. abhishek/autotrain-vr4a1-e5mms_eval_request_False_float16_Adapter.json +1 -1
  44. acbdkk/SupaMATH2_eval_request_False_4bit_Adapter.json +1 -1
  45. allknowingroger/Chocolatine-24B_eval_request_False_float16_Original.json +1 -1
  46. anthracite-org/magnum-v2-12b_eval_request_False_bfloat16_Original.json +1 -1
  47. anthracite-org/magnum-v3-34b_eval_request_False_bfloat16_Original.json +1 -1
  48. anthracite-org/magnum-v3-34b_eval_request_False_float16_Original.json +1 -1
  49. carsenk/flippa-v6_eval_request_False_float16_Adapter.json +1 -1
  50. djuna/L3.1-Suze-Vume-calc_eval_request_False_bfloat16_Original.json +1 -1
01-ai/Yi-Coder-9B-Chat_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.829,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T14:41:05Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8748118",
 
6
  "params": 8.829,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T14:41:05Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8748118",
01-ai/Yi-Coder-9B_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.829,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T14:43:55Z",
11
  "model_type": "\ud83d\udfe2 : \ud83d\udfe2 pretrained",
12
  "job_id": "8748132",
 
6
  "params": 8.829,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T14:43:55Z",
11
  "model_type": "\ud83d\udfe2 : \ud83d\udfe2 pretrained",
12
  "job_id": "8748132",
ArliAI/ArliAI-RPMax-12B-v1.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-05T06:23:17Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8653861",
 
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-05T06:23:17Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8653861",
BEE-spoke-data/tFINE-900m-e16-d32-flan_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 0.887,
7
  "architectures": "T5ForConditionalGeneration",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-13T00:33:27Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8752311",
 
6
  "params": 0.887,
7
  "architectures": "T5ForConditionalGeneration",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-13T00:33:27Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8752311",
BeaverAI/mistral-doryV2-12b_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-07T16:15:04Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8685313",
 
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-07T16:15:04Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8685313",
ClaudioItaly/Albacus_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.987,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:14:56Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8693835",
 
6
  "params": 8.987,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:14:56Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8693835",
DavidAU/L3-Dark-Planet-8B_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.03,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T01:26:02Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746294",
 
6
  "params": 8.03,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T01:26:02Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746294",
DavidAU/L3-Lumimaid-12.2B-v0.1-OAS-Instruct_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.174,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T01:31:57Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746309",
 
6
  "params": 12.174,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T01:31:57Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746309",
DavidAU/L3-SMB-Instruct-12.2B-F32_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.174,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T01:32:18Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746307",
 
6
  "params": 12.174,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T01:32:18Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746307",
DavidAU/L3-Stheno-v3.2-12.2B-Instruct_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.174,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T01:31:26Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746305",
 
6
  "params": 12.174,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T01:31:26Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8746305",
Dongwookss/small_fut_final_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 7.242,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-07-01T00:26:33Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8095898",
 
6
  "params": 7.242,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-07-01T00:26:33Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8095898",
DreadPoor/Heart_Stolen-ALT-8B-Model_Stock_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.03,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-11T14:47:15Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8740959",
 
6
  "params": 8.03,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-11T14:47:15Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8740959",
EpistemeAI/Alpaca-Llama3.1-8B_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-08-13T17:57:46Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747798",
 
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-08-13T17:57:46Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747798",
EpistemeAI2/Fireball-Alpaca-Llama3.1.04-8B-Philos_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-05T21:21:48Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8673409",
 
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-05T21:21:48Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8673409",
EpistemeAI2/Fireball-Alpaca-Llama3.1.06-8B-Philos-dpo_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-09T23:33:25Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8700941",
 
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-09T23:33:25Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8700941",
EpistemeAI2/Fireball-Alpaca-Llama3.1.07-8B-Philos-Math_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-10T20:49:35Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8721024",
 
6
  "params": 8.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-10T20:49:35Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8721024",
FuJhen/ft-openhermes-25-mistral-7b-irca-dpo-pairs_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 14.483,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T14:45:02Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8748185",
 
6
  "params": 14.483,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T14:45:02Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8748185",
FuJhen/mistral-instruct-7B-DPO_eval_request_False_bfloat16_Adapter.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 14.496,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T16:06:10Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8748183",
 
6
  "params": 14.496,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T16:06:10Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8748183",
GalrionSoftworks/MN-LooseCannon-12B-v1_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-05T06:29:58Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8653874",
 
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-05T06:29:58Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8653874",
Josephgflowers/TinyLlama_v1.1_math_code-world-test-1_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 1.1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-09T18:06:52Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749952",
 
6
  "params": 1.1,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-09T18:06:52Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749952",
KSU-HW-SEC/Llama3-70b-SVA-FT-1000_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:19:11Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693883",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:19:11Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693883",
KSU-HW-SEC/Llama3-70b-SVA-FT-1415_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:19:36Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693888",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:19:36Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693888",
KSU-HW-SEC/Llama3-70b-SVA-FT-500_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:19:50Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693890",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:19:50Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693890",
KSU-HW-SEC/Llama3-70b-SVA-FT-final_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:20:02Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693895",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:20:02Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693895",
KSU-HW-SEC/Llama3.1-70b-SVA-FT-1000step_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:20:14Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693899",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:20:14Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693899",
KSU-HW-SEC/Llama3.1-70b-SVA-FT-1415step_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:20:34Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693971",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:20:34Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693971",
KSU-HW-SEC/Llama3.1-70b-SVA-FT-500step_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T21:20:46Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693893",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T21:20:46Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8693893",
LeroyDyer/_Spydaz_Web_AI_ChatQA_001_SFT_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 7.242,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-11T06:12:04Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8739242",
 
6
  "params": 7.242,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-11T06:12:04Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8739242",
LeroyDyer/_Spydaz_Web_AI_ChatQA_001_UFT_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 7.242,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-11T06:11:17Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8739240",
 
6
  "params": 7.242,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-11T06:11:17Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8739240",
MaziyarPanahi/calme-2.1-llama3.1-70b_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-07-24T12:05:05Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8457691",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-07-24T12:05:05Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8457691",
Mxode/NanoLM-0.3B-Instruct-v1.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 0.315,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-05T13:49:40Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749943",
 
6
  "params": 0.315,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-05T13:49:40Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749943",
Mxode/NanoLM-0.3B-Instruct-v1_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 0.315,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-05T07:01:18Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749945",
 
6
  "params": 0.315,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-05T07:01:18Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749945",
Mxode/NanoLM-0.3B-Instruct-v2_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 0.315,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T11:30:40Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749947",
 
6
  "params": 0.315,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T11:30:40Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749947",
Mxode/NanoLM-1B-Instruct-v1.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 1.076,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-08T11:24:50Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749950",
 
6
  "params": 1.076,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-08T11:24:50Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749950",
Mxode/NanoLM-1B-Instruct-v2_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 1.076,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-09T04:38:17Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8748202",
 
6
  "params": 1.076,
7
  "architectures": "Qwen2ForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-09T04:38:17Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8748202",
OpenBuddy/openbuddy-llama3.1-70b-v22.1-131k_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-08-24T02:57:14Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8504352",
 
6
  "params": 70.554,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-08-24T02:57:14Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8504352",
Rakuten/RakutenAI-7B_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 7.373,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-06T23:46:08Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8681519",
 
6
  "params": 7.373,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-06T23:46:08Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8681519",
Trappu/Magnum-Picaro-0.7-v2-12b_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-12T17:04:44Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8749941",
 
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-12T17:04:44Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8749941",
VIRNECT/llama-3-Korean-8B-r-v-0.1_eval_request_False_float16_Adapter.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 16.061,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
- "status": "FAILED",
10
  "submitted_time": "2024-07-18T04:50:16Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749955",
 
6
  "params": 16.061,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-07-18T04:50:16Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8749955",
Weyaxi/Bagel-Hermes-34B-Slerp_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 34.389,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-08-30T04:16:05Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8461028",
 
6
  "params": 34.389,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-08-30T04:16:05Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8461028",
aaditya/Llama3-OpenBioLLM-70B_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 70.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-08-30T04:19:03Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8504354",
 
6
  "params": 70.0,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-08-30T04:19:03Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8504354",
abdullahriaz/llama3.1-med-bot_eval_request_False_4bit_Adapter.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 9.3,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
- "status": "FAILED",
10
  "submitted_time": "2024-08-28T13:58:28Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747833",
 
6
  "params": 9.3,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-08-28T13:58:28Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747833",
abhishek/autotrain-vr4a1-e5mms_eval_request_False_float16_Adapter.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 16.061,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-06T10:10:48Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8748200",
 
6
  "params": 16.061,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-06T10:10:48Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8748200",
acbdkk/SupaMATH2_eval_request_False_4bit_Adapter.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 4.455,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
- "status": "FAILED",
10
  "submitted_time": "2024-07-13T17:42:41Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747390",
 
6
  "params": 4.455,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-07-13T17:42:41Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747390",
allknowingroger/Chocolatine-24B_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 24.184,
7
  "architectures": "Phi3ForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-02T19:27:06Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8504278",
 
6
  "params": 24.184,
7
  "architectures": "Phi3ForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-02T19:27:06Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8504278",
anthracite-org/magnum-v2-12b_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-05T07:16:20Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8653963",
 
6
  "params": 12.248,
7
  "architectures": "MistralForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-05T07:16:20Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8653963",
anthracite-org/magnum-v3-34b_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 34.389,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-05T07:17:28Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8653968",
 
6
  "params": 34.389,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-05T07:17:28Z",
11
  "model_type": "\ud83d\udcac : \ud83d\udcac chat models (RLHF, DPO, IFT, ...)",
12
  "job_id": "8653968",
anthracite-org/magnum-v3-34b_eval_request_False_float16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 34.389,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-13T00:58:30Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8752343",
 
6
  "params": 34.389,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-13T00:58:30Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8752343",
carsenk/flippa-v6_eval_request_False_float16_Adapter.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 16.061,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
- "status": "FAILED",
10
  "submitted_time": "2024-08-24T22:48:00Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747857",
 
6
  "params": 16.061,
7
  "architectures": "?",
8
  "weight_type": "Adapter",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-08-24T22:48:00Z",
11
  "model_type": "\ud83d\udd36 : \ud83d\udd36 fine-tuned on domain-specific datasets",
12
  "job_id": "8747857",
djuna/L3.1-Suze-Vume-calc_eval_request_False_bfloat16_Original.json CHANGED
@@ -6,7 +6,7 @@
6
  "params": 8.03,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
- "status": "FAILED",
10
  "submitted_time": "2024-09-04T23:18:01Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8653559",
 
6
  "params": 8.03,
7
  "architectures": "LlamaForCausalLM",
8
  "weight_type": "Original",
9
+ "status": "PENDING",
10
  "submitted_time": "2024-09-04T23:18:01Z",
11
  "model_type": "\ud83e\udd1d : \ud83e\udd1d base merges and moerges",
12
  "job_id": "8653559",