_leaderboard
stringclasses 1
value | _developer
stringclasses 559
values | _model
stringlengths 9
102
| _uuid
stringlengths 36
36
| schema_version
stringclasses 1
value | evaluation_id
stringlengths 35
133
| retrieved_timestamp
stringlengths 13
18
| source_data
stringclasses 1
value | evaluation_source_name
stringclasses 1
value | evaluation_source_type
stringclasses 1
value | source_organization_name
stringclasses 1
value | source_organization_url
null | source_organization_logo_url
null | evaluator_relationship
stringclasses 1
value | model_name
stringlengths 4
102
| model_id
stringlengths 9
102
| model_developer
stringclasses 559
values | model_inference_platform
stringclasses 1
value | evaluation_results
stringlengths 1.35k
1.41k
| additional_details
stringclasses 660
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/ChocoTrio-14B-v1
|
c2034822-689f-4e8b-9575-b63081584aec
|
0.0.1
|
hfopenllm_v2/sometimesanotion_ChocoTrio-14B-v1/1762652580.518315
|
1762652580.518315
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/ChocoTrio-14B-v1
|
sometimesanotion/ChocoTrio-14B-v1
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7088912973133508}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6505840125855428}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3972809667673716}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3850671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4820520833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5369847074468085}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/LamarckInfusion-14B-v2-hi
|
e4b943ea-3e97-490b-af6d-ad7dc0fdf012
|
0.0.1
|
hfopenllm_v2/sometimesanotion_LamarckInfusion-14B-v2-hi/1762652580.521555
|
1762652580.521556
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/LamarckInfusion-14B-v2-hi
|
sometimesanotion/LamarckInfusion-14B-v2-hi
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.685485622592499}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6555026541798943}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4229607250755287}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3884228187919463}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48471875000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5404753989361702}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/LamarckInfusion-14B-v3
|
8fe84e89-c582-44d0-b961-d6ed4d889193
|
0.0.1
|
hfopenllm_v2/sometimesanotion_LamarckInfusion-14B-v3/1762652580.5219798
|
1762652580.5219798
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/LamarckInfusion-14B-v3
|
sometimesanotion/LamarckInfusion-14B-v3
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7131378076836128}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6517667892516962}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4123867069486405}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38674496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48202083333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5407247340425532}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/lamarck-14b-reason-model_stock
|
ee7d14c9-aa49-49df-99fc-057e7dae251f
|
0.0.1
|
hfopenllm_v2/sometimesanotion_lamarck-14b-reason-model_stock/1762652580.531434
|
1762652580.531434
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/lamarck-14b-reason-model_stock
|
sometimesanotion/lamarck-14b-reason-model_stock
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49646715160219335}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6568898541408251}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3580060422960725}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38422818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47408333333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5402260638297872}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/Lamarck-14B-v0.6
|
dd7005a5-281d-42e9-9916-663b1641718f
|
0.0.1
|
hfopenllm_v2/sometimesanotion_Lamarck-14B-v0.6/1762652580.519876
|
1762652580.519876
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/Lamarck-14B-v0.6
|
sometimesanotion/Lamarck-14B-v0.6
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6972510716011294}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6460312233782931}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4040785498489426}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38926174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4846875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5399767287234043}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/IF-reasoning-experiment-80
|
b1097c42-10fe-4892-8e85-60385ecf35bf
|
0.0.1
|
hfopenllm_v2/sometimesanotion_IF-reasoning-experiment-80/1762652580.5187662
|
1762652580.518767
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/IF-reasoning-experiment-80
|
sometimesanotion/IF-reasoning-experiment-80
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5462761029623622}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42103836132239286}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09894259818731117}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28439597315436244}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5024583333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3367686170212766}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.383}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/LamarckInfusion-14B-v1
|
e7577048-db59-4629-aeb0-f50b72cbb827
|
0.0.1
|
hfopenllm_v2/sometimesanotion_LamarckInfusion-14B-v1/1762652580.521131
|
1762652580.521132
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/LamarckInfusion-14B-v1
|
sometimesanotion/LamarckInfusion-14B-v1
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7198322672730577}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6539252513912222}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4169184290030212}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39093959731543626}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48989583333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5376496010638298}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/Lamarck-14B-v0.6-model_stock
|
92d4d9ca-d19f-45c5-b506-5b1039100c92
|
0.0.1
|
hfopenllm_v2/sometimesanotion_Lamarck-14B-v0.6-model_stock/1762652580.520298
|
1762652580.520299
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/Lamarck-14B-v0.6-model_stock
|
sometimesanotion/Lamarck-14B-v0.6-model_stock
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6789662539838739}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6269436532753222}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4244712990936556}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38422818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.50065625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.519780585106383}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.0}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/Lamarck-14B-v0.7-rc4
|
b3b9b1a5-4495-4649-9943-58986d94fcb1
|
0.0.1
|
hfopenllm_v2/sometimesanotion_Lamarck-14B-v0.7-rc4/1762652580.520921
|
1762652580.5209222
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/Lamarck-14B-v0.7-rc4
|
sometimesanotion/Lamarck-14B-v0.7-rc4
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7210811757248545}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6509652911243554}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4025679758308157}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38926174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4911979166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5399767287234043}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/Lamarck-14B-v0.7-Fusion
|
480b1187-5f66-4414-84b1-4c6ce1ebf137
|
0.0.1
|
hfopenllm_v2/sometimesanotion_Lamarck-14B-v0.7-Fusion/1762652580.52051
|
1762652580.520511
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/Lamarck-14B-v0.7-Fusion
|
sometimesanotion/Lamarck-14B-v0.7-Fusion
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6821134589555713}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6543636625652262}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4040785498489426}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.401006711409396}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49913541666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5390625}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/IF-reasoning-experiment-40
|
162b8329-ad84-463b-bda7-7383edda04d8
|
0.0.1
|
hfopenllm_v2/sometimesanotion_IF-reasoning-experiment-40/1762652580.518558
|
1762652580.518559
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/IF-reasoning-experiment-40
|
sometimesanotion/IF-reasoning-experiment-40
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6329793835910938}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6111859401994667}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3716012084592145}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3800335570469799}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5194166666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5024933510638298}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.0}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/LamarckInfusion-14B-v2
|
95f82b68-6135-4d7d-a2f8-b589d4041776
|
0.0.1
|
hfopenllm_v2/sometimesanotion_LamarckInfusion-14B-v2/1762652580.521342
|
1762652580.521342
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/LamarckInfusion-14B-v2
|
sometimesanotion/LamarckInfusion-14B-v2
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6811892445378263}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6564434429766982}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.438821752265861}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3875838926174497}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4992604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5416389627659575}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/KytheraMix-7B-v0.2
|
c50f0ef7-18e4-4f03-8262-ee1519c59b7f
|
0.0.1
|
hfopenllm_v2/sometimesanotion_KytheraMix-7B-v0.2/1762652580.5189881
|
1762652580.5189881
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/KytheraMix-7B-v0.2
|
sometimesanotion/KytheraMix-7B-v0.2
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6128705168951715}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5635202746804572}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29229607250755285}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33557046979865773}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45941666666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45054853723404253}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613}
|
HF Open LLM v2
|
sometimesanotion
|
sometimesanotion/LamarckInfusion-14B-v2-lo
|
57084771-cc66-485c-99ca-470556e14c1b
|
0.0.1
|
hfopenllm_v2/sometimesanotion_LamarckInfusion-14B-v2-lo/1762652580.52177
|
1762652580.521771
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
sometimesanotion/LamarckInfusion-14B-v2-lo
|
sometimesanotion/LamarckInfusion-14B-v2-lo
|
sometimesanotion
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6787911630030541}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6528441920403686}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42371601208459214}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3859060402684564}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4991041666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5397273936170213}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
lordjia
|
lordjia/Qwen2-Cantonese-7B-Instruct
|
869339ec-939c-4222-b178-533c3ca5b0d1
|
0.0.1
|
hfopenllm_v2/lordjia_Qwen2-Cantonese-7B-Instruct/1762652580.3277462
|
1762652580.3277462
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lordjia/Qwen2-Cantonese-7B-Instruct
|
lordjia/Qwen2-Cantonese-7B-Instruct
|
lordjia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5435278394659503}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5215311346221223}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25604229607250756}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2953020134228188}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40038541666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38430851063829785}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
lordjia
|
lordjia/Llama-3-Cantonese-8B-Instruct
|
f453cb41-346c-48b4-a660-64f13ec69fe4
|
0.0.1
|
hfopenllm_v2/lordjia_Llama-3-Cantonese-8B-Instruct/1762652580.3274932
|
1762652580.3274932
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lordjia/Llama-3-Cantonese-8B-Instruct
|
lordjia/Llama-3-Cantonese-8B-Instruct
|
lordjia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6669259786256023}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4814148018954038}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0891238670694864}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40460416666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35147938829787234}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
v000000
|
v000000/L3-8B-Stheno-v3.2-abliterated
|
33146dbb-8233-4f3d-9fd9-68cbacc3f293
|
0.0.1
|
hfopenllm_v2/v000000_L3-8B-Stheno-v3.2-abliterated/1762652580.584157
|
1762652580.584158
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
v000000/L3-8B-Stheno-v3.2-abliterated
|
v000000/L3-8B-Stheno-v3.2-abliterated
|
v000000
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6717720093795574}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5141439214918061}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06948640483383686}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30956375838926176}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36196875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3603723404255319}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
v000000
|
v000000/L3.1-Storniitova-8B
|
761f0cc0-c202-490d-93b4-447244f1e40a
|
0.0.1
|
hfopenllm_v2/v000000_L3.1-Storniitova-8B/1762652580.584696
|
1762652580.584697
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
v000000/L3.1-Storniitova-8B
|
v000000/L3.1-Storniitova-8B
|
v000000
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7816560060639104}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5151452004311876}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14652567975830816}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28942953020134227}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4028958333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37757646276595747}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
v000000
|
v000000/L3.1-Niitorm-8B-DPO-t0.0001
|
d90cef97-1e73-4068-bcb5-260a3f2586fe
|
0.0.1
|
hfopenllm_v2/v000000_L3.1-Niitorm-8B-DPO-t0.0001/1762652580.5844421
|
1762652580.5844429
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
v000000/L3.1-Niitorm-8B-DPO-t0.0001
|
v000000/L3.1-Niitorm-8B-DPO-t0.0001
|
v000000
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7688666072687137}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5134234526726582}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1623867069486405}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29446308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3879791666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38663563829787234}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
v000000
|
v000000/Qwen2.5-14B-Gutenberg-Instruct-Slerpeno
|
1f1da15c-3a82-4dfb-9b73-4381c70eb1ef
|
0.0.1
|
hfopenllm_v2/v000000_Qwen2.5-14B-Gutenberg-Instruct-Slerpeno/1762652580.585153
|
1762652580.585153
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
v000000/Qwen2.5-14B-Gutenberg-Instruct-Slerpeno
|
v000000/Qwen2.5-14B-Gutenberg-Instruct-Slerpeno
|
v000000
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8197493760998595}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.639010174859259}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5324773413897281}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3313758389261745}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4113645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4923537234042553}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77}
|
HF Open LLM v2
|
open-thoughts
|
open-thoughts/OpenThinker-7B
|
feb0d715-d1bc-4b0e-8585-a0646c07244b
|
0.0.1
|
hfopenllm_v2/open-thoughts_OpenThinker-7B/1762652580.4290519
|
1762652580.4290528
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
open-thoughts/OpenThinker-7B
|
open-thoughts/OpenThinker-7B
|
open-thoughts
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4088895242401273}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5342727589615611}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4259818731117825}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25671140939597314}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38199999999999995}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41647273936170215}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
MultivexAI
|
MultivexAI/Phi-3.5-Mini-Instruct-MultiVex-v0.25-GGUF
|
c14766b4-5339-4c6e-87d9-fc2bb953e176
|
0.0.1
|
hfopenllm_v2/MultivexAI_Phi-3.5-Mini-Instruct-MultiVex-v0.25-GGUF/1762652579.764051
|
1762652579.764052
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
MultivexAI/Phi-3.5-Mini-Instruct-MultiVex-v0.25-GGUF
|
MultivexAI/Phi-3.5-Mini-Instruct-MultiVex-v0.25-GGUF
|
MultivexAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14398241111362298}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29077474506950557}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.006042296072507553}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2550335570469799}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3641979166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11087101063829788}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
MultivexAI
|
MultivexAI/Gladiator-Mini-Exp-1221-3B-Instruct-V2
|
a152be8c-a542-4a73-8164-a43e1f04c595
|
0.0.1
|
hfopenllm_v2/MultivexAI_Gladiator-Mini-Exp-1221-3B-Instruct-V2/1762652579.763629
|
1762652579.7636302
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
MultivexAI/Gladiator-Mini-Exp-1221-3B-Instruct-V2
|
MultivexAI/Gladiator-Mini-Exp-1221-3B-Instruct-V2
|
MultivexAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6215386286165153}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.438883390990549}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14123867069486404}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2634228187919463}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30082291666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3025265957446808}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
|
HF Open LLM v2
|
MultivexAI
|
MultivexAI/Gladiator-Mini-Exp-1222-3B-Instruct
|
990d6877-4045-49ef-ae23-f5a6302185d6
|
0.0.1
|
hfopenllm_v2/MultivexAI_Gladiator-Mini-Exp-1222-3B-Instruct/1762652579.763836
|
1762652579.7638369
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
MultivexAI/Gladiator-Mini-Exp-1222-3B-Instruct
|
MultivexAI/Gladiator-Mini-Exp-1222-3B-Instruct
|
MultivexAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6163180361440976}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4373182371021645}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14123867069486404}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2634228187919463}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31276041666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30169547872340424}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
|
HF Open LLM v2
|
MultivexAI
|
MultivexAI/Gladiator-Mini-Exp-1211-3B
|
2cc4a013-ff0c-44b0-b2e1-66e103606e12
|
0.0.1
|
hfopenllm_v2/MultivexAI_Gladiator-Mini-Exp-1211-3B/1762652579.763158
|
1762652579.763159
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
MultivexAI/Gladiator-Mini-Exp-1211-3B
|
MultivexAI/Gladiator-Mini-Exp-1211-3B
|
MultivexAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.68760887777763}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44843752663028075}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13746223564954682}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2726510067114094}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.326}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3151595744680851}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
|
HF Open LLM v2
|
MultivexAI
|
MultivexAI/Gladiator-Mini-Exp-1221-3B-Instruct
|
ebfb99cd-9672-4c30-9540-46e4035a0d43
|
0.0.1
|
hfopenllm_v2/MultivexAI_Gladiator-Mini-Exp-1221-3B-Instruct/1762652579.763424
|
1762652579.763425
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
MultivexAI/Gladiator-Mini-Exp-1221-3B-Instruct
|
MultivexAI/Gladiator-Mini-Exp-1221-3B-Instruct
|
MultivexAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6078748830879843}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4369766992416903}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1351963746223565}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2634228187919463}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31145833333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3048537234042553}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
|
HF Open LLM v2
|
allenai
|
allenai/OLMo-1.7-7B-hf
|
5d7caae7-0242-4a5d-b3be-c677b958d130
|
0.0.1
|
hfopenllm_v2/allenai_OLMo-1.7-7B-hf/1762652579.9836009
|
1762652579.9836018
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMo-1.7-7B-hf
|
allenai/OLMo-1.7-7B-hf
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1568970332052288}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3013695911207614}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0022658610271903325}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2550335570469799}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34748958333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11236702127659574}}]
|
{"precision": "float16", "architecture": "Unknown", "params_billions": 0.0}
|
HF Open LLM v2
|
allenai
|
allenai/OLMo-7B-hf
|
6308f97d-aecd-467a-91f0-5a1650ccc22a
|
0.0.1
|
hfopenllm_v2/allenai_OLMo-7B-hf/1762652579.984753
|
1762652579.984753
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMo-7B-hf
|
allenai/OLMo-7B-hf
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2719273749207658}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32791316587362274}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.012084592145015106}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2726510067114094}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3486666666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11727061170212766}}]
|
{"precision": "bfloat16", "architecture": "OlmoForCausalLM", "params_billions": 6.888}
|
HF Open LLM v2
|
allenai
|
allenai/OLMoE-1B-7B-0924-Instruct
|
a580b690-0829-43b9-8d52-6dd226208901
|
0.0.1
|
hfopenllm_v2/allenai_OLMoE-1B-7B-0924-Instruct/1762652579.98542
|
1762652579.98542
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMoE-1B-7B-0924-Instruct
|
allenai/OLMoE-1B-7B-0924-Instruct
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4667415790103592}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3901610626816106}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.027945619335347432}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2676174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3848229166666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18758311170212766}}]
|
{"precision": "bfloat16", "architecture": "OlmoeForCausalLM", "params_billions": 6.919}
|
HF Open LLM v2
|
allenai
|
allenai/Llama-3.1-Tulu-3-8B-DPO
|
81bd1edf-be5b-4ae6-a2cc-723aaa040eb9
|
0.0.1
|
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-8B-DPO/1762652579.9829278
|
1762652579.982929
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/Llama-3.1-Tulu-3-8B-DPO
|
allenai/Llama-3.1-Tulu-3-8B-DPO
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8029384255996312}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4079428557044153}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.236404833836858}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41613541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2898105053191489}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.0}
|
HF Open LLM v2
|
allenai
|
allenai/OLMoE-1B-7B-0924
|
af1bb542-77cb-47e2-89f1-16cc91e89452
|
0.0.1
|
hfopenllm_v2/allenai_OLMoE-1B-7B-0924/1762652579.985209
|
1762652579.9852102
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMoE-1B-7B-0924
|
allenai/OLMoE-1B-7B-0924
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21847143357402804}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3393437931177341}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24748322147651006}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34879166666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1739527925531915}}]
|
{"precision": "bfloat16", "architecture": "OlmoeForCausalLM", "params_billions": 6.919}
|
HF Open LLM v2
|
allenai
|
allenai/OLMo-1B-hf
|
d13f5416-1d95-431b-8f01-b969066ec960
|
0.0.1
|
hfopenllm_v2/allenai_OLMo-1B-hf/1762652579.983823
|
1762652579.983823
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMo-1B-hf
|
allenai/OLMo-1B-hf
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21819660722438686}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30519468988429327}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.017371601208459216}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26174496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40978125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11735372340425532}}]
|
{"precision": "bfloat16", "architecture": "OlmoForCausalLM", "params_billions": 1.177}
|
HF Open LLM v2
|
allenai
|
allenai/Llama-3.1-Tulu-3-8B-SFT
|
35674acb-a68c-4ac1-9aac-ac9cb44801e6
|
0.0.1
|
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-8B-SFT/1762652579.983397
|
1762652579.983398
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/Llama-3.1-Tulu-3-8B-SFT
|
allenai/Llama-3.1-Tulu-3-8B-SFT
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7403400754442657}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3871863270501647}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11782477341389729}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27768456375838924}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4267708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28116688829787234}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
allenai
|
allenai/OLMo-7B-Instruct-hf
|
7ff78ffd-c934-4a17-b30d-2d8267f3e25a
|
0.0.1
|
hfopenllm_v2/allenai_OLMo-7B-Instruct-hf/1762652579.98445
|
1762652579.984452
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMo-7B-Instruct-hf
|
allenai/OLMo-7B-Instruct-hf
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3472652561869174}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3706469866662716}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2709731543624161}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37647916666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17852393617021275}}]
|
{"precision": "bfloat16", "architecture": "OlmoForCausalLM", "params_billions": 7.0}
|
HF Open LLM v2
|
allenai
|
allenai/Llama-3.1-Tulu-3-70B-SFT
|
6921281e-5756-4f0d-a37c-3b05ff6b2703
|
0.0.1
|
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-70B-SFT/1762652579.982346
|
1762652579.982346
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/Llama-3.1-Tulu-3-70B-SFT
|
allenai/Llama-3.1-Tulu-3-70B-SFT
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8050616807847621}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5951437800580934}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33157099697885195}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3447986577181208}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5026145833333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46243351063829785}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
|
HF Open LLM v2
|
allenai
|
allenai/Llama-3.1-Tulu-3-70B-DPO
|
b790e9c5-2412-4aa0-a975-37b8662a82cf
|
0.0.1
|
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-70B-DPO/1762652579.9821
|
1762652579.982101
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/Llama-3.1-Tulu-3-70B-DPO
|
allenai/Llama-3.1-Tulu-3-70B-DPO
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8281925291559729}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6146203626958501}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44939577039274925}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37583892617449666}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4922604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4632646276595745}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.0}
|
HF Open LLM v2
|
allenai
|
allenai/OLMo-2-1124-7B-Instruct
|
17df660f-6a91-476f-a7e8-7169eef1c24d
|
0.0.1
|
hfopenllm_v2/allenai_OLMo-2-1124-7B-Instruct/1762652579.9840362
|
1762652579.9840372
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMo-2-1124-7B-Instruct
|
allenai/OLMo-2-1124-7B-Instruct
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7244034716773715}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40223602474417786}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1487915407854985}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2785234899328859}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35083333333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2672041223404255}}]
|
{"precision": "float16", "architecture": "Olmo2ForCausalLM", "params_billions": 7.299}
|
HF Open LLM v2
|
allenai
|
allenai/OLMoE-1B-7B-0125-Instruct
|
af176c4c-b06f-44ac-bcba-1331d9148958
|
0.0.1
|
hfopenllm_v2/allenai_OLMoE-1B-7B-0125-Instruct/1762652579.984983
|
1762652579.984983
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
allenai/OLMoE-1B-7B-0125-Instruct
|
allenai/OLMoE-1B-7B-0125-Instruct
|
allenai
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6757436934001781}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38245348916008676}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08987915407854985}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3635833333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19148936170212766}}]
|
{"precision": "float16", "architecture": "OlmoeForCausalLM", "params_billions": 6.919}
|
HF Open LLM v2
|
FINGU-AI
|
FINGU-AI/Ultimos-32B
|
fa69d78a-e112-45ff-80c3-b4eb30d83ed9
|
0.0.1
|
hfopenllm_v2/FINGU-AI_Ultimos-32B/1762652579.617578
|
1762652579.617579
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
FINGU-AI/Ultimos-32B
|
FINGU-AI/Ultimos-32B
|
FINGU-AI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1592197591280026}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2905531373728777}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24916107382550334}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32860416666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11112034574468085}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 9.604}
|
HF Open LLM v2
|
FINGU-AI
|
FINGU-AI/RomboUltima-32B
|
65c5a05d-0b24-4767-88ff-24984fa0f988
|
0.0.1
|
hfopenllm_v2/FINGU-AI_RomboUltima-32B/1762652579.6173398
|
1762652579.617341
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
FINGU-AI/RomboUltima-32B
|
FINGU-AI/RomboUltima-32B
|
FINGU-AI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6671509372908327}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6938448333620042}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5385196374622356}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3716442953020134}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4836354166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.578873005319149}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 17.645}
|
HF Open LLM v2
|
FINGU-AI
|
FINGU-AI/L3-8B
|
f2a0c2ff-40a4-4a75-93ca-b611c4314dd5
|
0.0.1
|
hfopenllm_v2/FINGU-AI_L3-8B/1762652579.615993
|
1762652579.615993
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
FINGU-AI/L3-8B
|
FINGU-AI/L3-8B
|
FINGU-AI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7517309627344335}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4985585187130108}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2545317220543807}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2953020134228188}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38283333333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36394614361702127}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
FINGU-AI
|
FINGU-AI/Q-Small-3B
|
11d9d5ea-29f2-412e-af48-858626ebeec5
|
0.0.1
|
hfopenllm_v2/FINGU-AI_Q-Small-3B/1762652579.616768
|
1762652579.61677
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
FINGU-AI/Q-Small-3B
|
FINGU-AI/Q-Small-3B
|
FINGU-AI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4145345461154182}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43185314557630744}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08308157099697885}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40054166666666663}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27900598404255317}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.086}
|
HF Open LLM v2
|
FINGU-AI
|
FINGU-AI/QwQ-Buddy-32B-Alpha
|
32836e5d-d413-4e40-8c9c-4cb8c3daa23a
|
0.0.1
|
hfopenllm_v2/FINGU-AI_QwQ-Buddy-32B-Alpha/1762652579.617035
|
1762652579.617036
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
FINGU-AI/QwQ-Buddy-32B-Alpha
|
FINGU-AI/QwQ-Buddy-32B-Alpha
|
FINGU-AI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34464221598691475}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.642442234274039}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3851963746223565}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37919463087248323}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5059895833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5294215425531915}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 19.662}
|
HF Open LLM v2
|
FINGU-AI
|
FINGU-AI/Chocolatine-Fusion-14B
|
5d5a7561-8a41-48ea-ae1c-e986ac666f19
|
0.0.1
|
hfopenllm_v2/FINGU-AI_Chocolatine-Fusion-14B/1762652579.615752
|
1762652579.615752
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
FINGU-AI/Chocolatine-Fusion-14B
|
FINGU-AI/Chocolatine-Fusion-14B
|
FINGU-AI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6949028577507679}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.64132285324613}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3851963746223565}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3716442953020134}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49402083333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5261801861702128}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 8.367}
|
HF Open LLM v2
|
nhyha
|
nhyha/merge_Qwen2.5-7B-Instruct_20241023_0314
|
eb608d79-545a-4cc2-8d28-e539a3af7f17
|
0.0.1
|
hfopenllm_v2/nhyha_merge_Qwen2.5-7B-Instruct_20241023_0314/1762652580.406431
|
1762652580.406431
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nhyha/merge_Qwen2.5-7B-Instruct_20241023_0314
|
nhyha/merge_Qwen2.5-7B-Instruct_20241023_0314
|
nhyha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5694568190179834}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5558529241660143}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3542296072507553}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3213087248322148}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42506249999999995}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45420545212765956}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
nhyha
|
nhyha/N3N_Delirium-v1_1030_0227
|
5128233e-41be-4e26-9ec2-2b7926c66b7c
|
0.0.1
|
hfopenllm_v2/nhyha_N3N_Delirium-v1_1030_0227/1762652580.4055
|
1762652580.4055
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nhyha/N3N_Delirium-v1_1030_0227
|
nhyha/N3N_Delirium-v1_1030_0227
|
nhyha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8022890375315275}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5890686677822234}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2107250755287009}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.337248322147651}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40981249999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41497672872340424}}]
|
{"precision": "bfloat16", "architecture": "Gemma2ForCausalLM", "params_billions": 10.159}
|
HF Open LLM v2
|
nhyha
|
nhyha/N3N_Llama-3.1-8B-Instruct_1028_0216
|
928f9cd0-ce0f-43f7-aa5f-be9cbf4d91cd
|
0.0.1
|
hfopenllm_v2/nhyha_N3N_Llama-3.1-8B-Instruct_1028_0216/1762652580.405756
|
1762652580.405757
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nhyha/N3N_Llama-3.1-8B-Instruct_1028_0216
|
nhyha/N3N_Llama-3.1-8B-Instruct_1028_0216
|
nhyha
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4796063334175543}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5053741309920361}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17069486404833836}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3062080536912752}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40503125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36377992021276595}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Corianas
|
Corianas/Quokka_2.7b
|
54015982-408c-469b-86da-6642f5708180
|
0.0.1
|
hfopenllm_v2/Corianas_Quokka_2.7b/1762652579.5120142
|
1762652579.512015
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Corianas/Quokka_2.7b
|
Corianas/Quokka_2.7b
|
Corianas
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17490702447284318}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3055474937424842}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.008308157099697885}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2558724832214765}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3908333333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11452792553191489}}]
|
{"precision": "float16", "architecture": "GPT2LMHeadModel", "params_billions": 2.786}
|
HF Open LLM v2
|
TheHierophant
|
TheHierophant/Underground-Cognitive-V0.3-test
|
872cc338-765c-4291-8b50-77b4bce719fd
|
0.0.1
|
hfopenllm_v2/TheHierophant_Underground-Cognitive-V0.3-test/1762652579.916598
|
1762652579.916598
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
TheHierophant/Underground-Cognitive-V0.3-test
|
TheHierophant/Underground-Cognitive-V0.3-test
|
TheHierophant
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4808297539417634}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5290131900998047}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05891238670694864}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2986577181208054}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43511458333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.331781914893617}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.732}
|
HF Open LLM v2
|
intervitens
|
intervitens/mini-magnum-12b-v1.1
|
8ad974e6-8d4c-45bf-86d0-f701cfc323d5
|
0.0.1
|
hfopenllm_v2/intervitens_mini-magnum-12b-v1.1/1762652580.228364
|
1762652580.228365
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
intervitens/mini-magnum-12b-v1.1
|
intervitens/mini-magnum-12b-v1.1
|
intervitens
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5155509603407846}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.506180035650624}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.061933534743202415}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28859060402684567}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4004479166666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3291223404255319}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
tangledgroup
|
tangledgroup/tangled-llama-pints-1.5b-v0.2-instruct
|
3964e579-bb1f-46be-8740-ba8097d8f7ef
|
0.0.1
|
hfopenllm_v2/tangledgroup_tangled-llama-pints-1.5b-v0.2-instruct/1762652580.551594
|
1762652580.551595
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
tangledgroup/tangled-llama-pints-1.5b-v0.2-instruct
|
tangledgroup/tangled-llama-pints-1.5b-v0.2-instruct
|
tangledgroup
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1724092075692496}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3158349391752727}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01283987915407855}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24161073825503357}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3642916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11170212765957446}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.5}
|
HF Open LLM v2
|
tangledgroup
|
tangledgroup/tangled-llama-pints-1.5b-v0.1-instruct
|
727047f6-974d-4980-a8cd-672728885485
|
0.0.1
|
hfopenllm_v2/tangledgroup_tangled-llama-pints-1.5b-v0.1-instruct/1762652580.5513222
|
1762652580.5513222
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
tangledgroup/tangled-llama-pints-1.5b-v0.1-instruct
|
tangledgroup/tangled-llama-pints-1.5b-v0.1-instruct
|
tangledgroup
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15090182936829835}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31434444692284963}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.012084592145015106}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23993288590604026}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37613541666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11087101063829788}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.5}
|
HF Open LLM v2
|
Aashraf995
|
Aashraf995/Creative-7B-nerd
|
7ea9f4db-5b52-40a5-904e-785e43302934
|
0.0.1
|
hfopenllm_v2/Aashraf995_Creative-7B-nerd/1762652579.476046
|
1762652579.476046
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Aashraf995/Creative-7B-nerd
|
Aashraf995/Creative-7B-nerd
|
Aashraf995
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4721871301480073}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5606785565640195}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3164652567975831}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3263422818791946}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4515416666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44921875}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
maldv
|
maldv/Qwentile2.5-32B-Instruct
|
f4fde074-8a05-42ec-884c-447b4bfaba39
|
0.0.1
|
hfopenllm_v2/maldv_Qwentile2.5-32B-Instruct/1762652580.3309162
|
1762652580.3309171
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
maldv/Qwentile2.5-32B-Instruct
|
maldv/Qwentile2.5-32B-Instruct
|
maldv
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7393161256576994}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6962837451098368}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5219033232628398}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38422818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4682291666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5879321808510638}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
maldv
|
maldv/Lytta2.5-32B-Instruct
|
27575e22-2e66-4177-aa8f-ab4ebd4743ea
|
0.0.1
|
hfopenllm_v2/maldv_Lytta2.5-32B-Instruct/1762652580.3306072
|
1762652580.3306088
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
maldv/Lytta2.5-32B-Instruct
|
maldv/Lytta2.5-32B-Instruct
|
maldv
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25079455843827714}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.559971089357847}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34441087613293053}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37685416666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5048204787234043}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
maldv
|
maldv/Awqward2.5-32B-Instruct
|
8b330a87-7689-45ae-a005-0349e09f07ac
|
0.0.1
|
hfopenllm_v2/maldv_Awqward2.5-32B-Instruct/1762652580.3302772
|
1762652580.330278
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
maldv/Awqward2.5-32B-Instruct
|
maldv/Awqward2.5-32B-Instruct
|
maldv
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8254697535871487}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6974465506773041}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6231117824773413}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34060402684563756}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42748958333333337}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5723071808510638}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
mkxu
|
mkxu/llama-3-8b-instruct-fpo
|
0ba6add2-4495-4261-baab-224c0b6c683f
|
0.0.1
|
hfopenllm_v2/mkxu_llama-3-8b-instruct-fpo/1762652580.366677
|
1762652580.366678
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
mkxu/llama-3-8b-instruct-fpo
|
mkxu/llama-3-8b-instruct-fpo
|
mkxu
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6790161216682846}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4959114413700331}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07326283987915408}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27768456375838924}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36578125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36045545212765956}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
tklohj
|
tklohj/WindyFloLLM
|
53f0c477-6f06-427a-be34-5b0131cbf9e1
|
0.0.1
|
hfopenllm_v2/tklohj_WindyFloLLM/1762652580.573854
|
1762652580.573855
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
tklohj/WindyFloLLM
|
tklohj/WindyFloLLM
|
tklohj
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26685638550158025}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4636616007058791}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.015861027190332326}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2751677852348993}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4253125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25814494680851063}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 13.016}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0.3
|
ccf2d437-d3e3-4a53-9249-e6df2fd04f49
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-2-14B-Instruct-v2.0.3/1762652580.298579
|
1762652580.29858
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0.3
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0.3
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7037205725253439}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6548026688308357}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4206948640483384}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37919463087248323}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47681250000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5374002659574468}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-2-14B-Instruct-DPO-v2.0b1
|
9ae740a8-6d7c-438c-942f-11ac0f6cbe79
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-2-14B-Instruct-DPO-v2.0b1/1762652580.2977622
|
1762652580.297763
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-2-14B-Instruct-DPO-v2.0b1
|
jpacifico/Chocolatine-2-14B-Instruct-DPO-v2.0b1
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10334024831890495}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.669567432054888}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2756797583081571}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37583892617449666}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44673958333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5123836436170213}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Distilucie-7B-Math-Instruct-DPO-v0.1
|
8ea866ce-c4a8-4981-b221-ee7b2dc898cd
|
0.0.1
|
hfopenllm_v2/jpacifico_Distilucie-7B-Math-Instruct-DPO-v0.1/1762652580.300392
|
1762652580.3003929
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Distilucie-7B-Math-Instruct-DPO-v0.1
|
jpacifico/Distilucie-7B-Math-Instruct-DPO-v0.1
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30475028479988653}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38346961466103785}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0256797583081571}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29949664429530204}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3644479166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1809341755319149}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 6.707}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0b2
|
6837502d-0f08-48d8-b85e-70f3e07a2531
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-2-14B-Instruct-v2.0b2/1762652580.298837
|
1762652580.298838
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0b2
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0b2
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7240787776433197}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6475822300543483}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3950151057401813}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38338926174496646}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48075}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5369015957446809}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Lucie-7B-Instruct-Merged-Model_Stock-v1.1
|
03e7b19a-c31a-4bd4-8560-3b8ac4c7c80c
|
0.0.1
|
hfopenllm_v2/jpacifico_Lucie-7B-Instruct-Merged-Model_Stock-v1.1/1762652580.301858
|
1762652580.3018591
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Lucie-7B-Instruct-Merged-Model_Stock-v1.1
|
jpacifico/Lucie-7B-Instruct-Merged-Model_Stock-v1.1
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30142798884736943}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38078615414710804}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.027945619335347432}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2827181208053691}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37502083333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18617021276595744}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 6.707}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-14B-Instruct-4k-DPO
|
fe0cfe19-b019-459e-a71d-46d55612a95e
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-14B-Instruct-4k-DPO/1762652580.296761
|
1762652580.2967622
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-14B-Instruct-4k-DPO
|
jpacifico/Chocolatine-14B-Instruct-4k-DPO
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4688648341954902}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6299582409761587}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1782477341389728}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3414429530201342}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44388541666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4763962765957447}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 13.96}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-3B-Instruct-DPO-Revised
|
08a646ba-9b4a-483e-8adf-f4e203a9be5d
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-3B-Instruct-DPO-Revised/1762652580.299312
|
1762652580.299314
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-3B-Instruct-DPO-Revised
|
jpacifico/Chocolatine-3B-Instruct-DPO-Revised
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5622625744136669}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5539982344792619}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18051359516616314}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3221476510067114}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44534375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3988530585106383}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-3B-Instruct-DPO-v1.0
|
7f969b69-cb14-4291-a15f-60f2b56e23ad
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-3B-Instruct-DPO-v1.0/1762652580.29967
|
1762652580.299671
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-3B-Instruct-DPO-v1.0
|
jpacifico/Chocolatine-3B-Instruct-DPO-v1.0
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3737184005106451}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5471398082537478}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1782477341389728}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31543624161073824}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4754791666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3937001329787234}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Lucie-7B-Instruct-DPO-v1.1
|
ad0aa0da-dac4-42a9-ae62-ebe03aa40643
|
0.0.1
|
hfopenllm_v2/jpacifico_Lucie-7B-Instruct-DPO-v1.1/1762652580.300676
|
1762652580.300677
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Lucie-7B-Instruct-DPO-v1.1
|
jpacifico/Lucie-7B-Instruct-DPO-v1.1
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31209413245743517}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37810118011411814}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.023413897280966767}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.287751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40159374999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18375997340425532}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 6.707}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0b3
|
f345f9cb-7233-4f4e-8e8b-a0b607502d1d
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-2-14B-Instruct-v2.0b3/1762652580.2990808
|
1762652580.299082
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0b3
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0b3
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7322969720342026}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.646878884179919}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4108761329305136}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37919463087248323}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47811458333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5337433510638298}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0
|
85b8aede-7eb3-4997-9529-2f7d4603fb9e
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-2-14B-Instruct-v2.0/1762652580.2980192
|
1762652580.2980192
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0885273297073986}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6769929749559443}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48036253776435045}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3875838926174497}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5021145833333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5301695478723404}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0.1
|
c68ca8a7-07d8-4295-a535-a573fc3893b7
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-2-14B-Instruct-v2.0.1/1762652580.298285
|
1762652580.2982872
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0.1
|
jpacifico/Chocolatine-2-14B-Instruct-v2.0.1
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07421419611076388}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6736278064166185}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.479607250755287}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39177852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.50075}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5299202127659575}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-3B-Instruct-DPO-v1.2
|
f34988e6-20f5-4d77-9233-70d5bc6193fb
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-3B-Instruct-DPO-v1.2/1762652580.300061
|
1762652580.300063
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-3B-Instruct-DPO-v1.2
|
jpacifico/Chocolatine-3B-Instruct-DPO-v1.2
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5455014915978493}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5487182027245813}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20468277945619334}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3389261744966443}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41542708333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3877160904255319}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Lucie-Boosted-7B-Instruct
|
4c7575d2-d538-4767-8d7e-d905b11f84f9
|
0.0.1
|
hfopenllm_v2/jpacifico_Lucie-Boosted-7B-Instruct/1762652580.302166
|
1762652580.3021681
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Lucie-Boosted-7B-Instruct
|
jpacifico/Lucie-Boosted-7B-Instruct
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25661467129438775}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34654827210803724}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01283987915407855}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.369875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1629820478723404}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 6.707}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Lucie-7B-Instruct-DPO-v1.1.3
|
643a510c-b9f4-4222-a1b0-09d7d5434de8
|
0.0.1
|
hfopenllm_v2/jpacifico_Lucie-7B-Instruct-DPO-v1.1.3/1762652580.3010209
|
1762652580.301022
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Lucie-7B-Instruct-DPO-v1.1.3
|
jpacifico/Lucie-7B-Instruct-DPO-v1.1.3
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3044754584502453}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.381900181819828}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.02416918429003021}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2860738255033557}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38178124999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1763630319148936}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 6.707}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-14B-Instruct-DPO-v1.3
|
b56c681a-592f-491a-aa0a-030848356563
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-14B-Instruct-DPO-v1.3/1762652580.2973812
|
1762652580.297384
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-14B-Instruct-DPO-v1.3
|
jpacifico/Chocolatine-14B-Instruct-DPO-v1.3
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.703995398874985}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6846125547592651}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5619335347432024}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3414429530201342}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42339583333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5374002659574468}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 14.66}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Lucie-7B-Instruct-Merged-Model_Stock-v1.0
|
f28fc4d7-d3eb-4915-967a-db97667e85bc
|
0.0.1
|
hfopenllm_v2/jpacifico_Lucie-7B-Instruct-Merged-Model_Stock-v1.0/1762652580.3014882
|
1762652580.3014889
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Lucie-7B-Instruct-Merged-Model_Stock-v1.0
|
jpacifico/Lucie-7B-Instruct-Merged-Model_Stock-v1.0
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32335979645119395}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3802022135816421}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.02416918429003021}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28859060402684567}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38438541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1870844414893617}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 6.707}
|
HF Open LLM v2
|
jpacifico
|
jpacifico/Chocolatine-14B-Instruct-DPO-v1.2
|
aae9e150-7992-4241-91af-0c55d03d709f
|
0.0.1
|
hfopenllm_v2/jpacifico_Chocolatine-14B-Instruct-DPO-v1.2/1762652580.297051
|
1762652580.297052
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
jpacifico/Chocolatine-14B-Instruct-DPO-v1.2
|
jpacifico/Chocolatine-14B-Instruct-DPO-v1.2
|
jpacifico
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6852107962428579}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6438408959901142}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20921450151057402}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32550335570469796}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4267708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46966422872340424}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 13.96}
|
HF Open LLM v2
|
wannaphong
|
wannaphong/KhanomTanLLM-Instruct
|
681b02e4-7b57-42b7-9550-59c664511b01
|
0.0.1
|
hfopenllm_v2/wannaphong_KhanomTanLLM-Instruct/1762652580.59218
|
1762652580.59218
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
wannaphong/KhanomTanLLM-Instruct
|
wannaphong/KhanomTanLLM-Instruct
|
wannaphong
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16211762567764643}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30931233392513263}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2634228187919463}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37006249999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1118683510638298}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.447}
|
HF Open LLM v2
|
lesubra
|
lesubra/ECE-PRYMMAL-3B-SLERP_2-V1
|
c6b7d02d-4d2d-43fa-95a8-aa188f38120a
|
0.0.1
|
hfopenllm_v2/lesubra_ECE-PRYMMAL-3B-SLERP_2-V1/1762652580.320611
|
1762652580.3206122
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/ECE-PRYMMAL-3B-SLERP_2-V1
|
lesubra/ECE-PRYMMAL-3B-SLERP_2-V1
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3649006857360692}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5411447467732948}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16767371601208458}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3213087248322148}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4661458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3990192819148936}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
lesubra
|
lesubra/ECE-EIFFEL-3Bv3
|
317a27cd-9458-4157-a304-0c1e3739d0fb
|
0.0.1
|
hfopenllm_v2/lesubra_ECE-EIFFEL-3Bv3/1762652580.319853
|
1762652580.319854
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/ECE-EIFFEL-3Bv3
|
lesubra/ECE-EIFFEL-3Bv3
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3786142989490109}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5469446669064592}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16691842900302115}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3296979865771812}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46751041666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39752327127659576}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
lesubra
|
lesubra/ECE-EIFFEL-3B
|
b32f3852-47ce-4ca5-98a0-5e2f166a11e9
|
0.0.1
|
hfopenllm_v2/lesubra_ECE-EIFFEL-3B/1762652580.319232
|
1762652580.319233
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/ECE-EIFFEL-3B
|
lesubra/ECE-EIFFEL-3B
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3469405621528655}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5101583259186949}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1216012084592145}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3313758389261745}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43622916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3820644946808511}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
lesubra
|
lesubra/ECE-PRYMMAL-3B-SLERP_2-V2
|
653cb458-4616-4325-b377-a79ee4a5d9c6
|
0.0.1
|
hfopenllm_v2/lesubra_ECE-PRYMMAL-3B-SLERP_2-V2/1762652580.320825
|
1762652580.320826
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/ECE-PRYMMAL-3B-SLERP_2-V2
|
lesubra/ECE-PRYMMAL-3B-SLERP_2-V2
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3664244205375071}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5411447467732948}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16767371601208458}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3213087248322148}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4661458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3990192819148936}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
lesubra
|
lesubra/ECE-PRYMMAL-3B-SLERP-V2
|
cb14b942-7c2f-489f-bede-d25279ea39ac
|
0.0.1
|
hfopenllm_v2/lesubra_ECE-PRYMMAL-3B-SLERP-V2/1762652580.320386
|
1762652580.3203871
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/ECE-PRYMMAL-3B-SLERP-V2
|
lesubra/ECE-PRYMMAL-3B-SLERP-V2
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2932840418977203}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5340594627933309}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31711409395973156}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45951041666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3900432180851064}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
lesubra
|
lesubra/ECE-PRYMMAL-3B-SLERP-V1
|
6fb1242d-bf20-43e6-acfe-77a88c020eee
|
0.0.1
|
hfopenllm_v2/lesubra_ECE-PRYMMAL-3B-SLERP-V1/1762652580.320159
|
1762652580.32016
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/ECE-PRYMMAL-3B-SLERP-V1
|
lesubra/ECE-PRYMMAL-3B-SLERP-V1
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2932840418977203}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5340594627933309}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31711409395973156}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45951041666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3900432180851064}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
lesubra
|
lesubra/ECE-EIFFEL-3Bv2
|
7e511f3b-7d8e-44c4-ad3f-7f6e66231109
|
0.0.1
|
hfopenllm_v2/lesubra_ECE-EIFFEL-3Bv2/1762652580.319594
|
1762652580.319595
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/ECE-EIFFEL-3Bv2
|
lesubra/ECE-EIFFEL-3Bv2
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30130276555096036}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5424007873371969}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11858006042296072}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33557046979865773}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4442916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39993351063829785}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
lesubra
|
lesubra/merge-test
|
6f16b360-346a-4299-8f60-fafc0bb8ebcd
|
0.0.1
|
hfopenllm_v2/lesubra_merge-test/1762652580.321054
|
1762652580.321055
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
lesubra/merge-test
|
lesubra/merge-test
|
lesubra
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.538257379309122}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5240434385320306}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12084592145015106}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3221476510067114}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44190625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38738364361702127}}]
|
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
|
HF Open LLM v2
|
invisietch
|
invisietch/MiS-Firefly-v0.2-22B
|
6df8e489-865f-4692-a673-6abbf2159d1d
|
0.0.1
|
hfopenllm_v2/invisietch_MiS-Firefly-v0.2-22B/1762652580.2300959
|
1762652580.2300968
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
invisietch/MiS-Firefly-v0.2-22B
|
invisietch/MiS-Firefly-v0.2-22B
|
invisietch
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5371082062261466}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5513523591170696}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16540785498489427}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30453020134228187}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46937500000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3620345744680851}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 22.247}
|
HF Open LLM v2
|
invisietch
|
invisietch/EtherealRainbow-v0.2-8B
|
c60869f0-7009-48c9-be41-339335e5ee4e
|
0.0.1
|
hfopenllm_v2/invisietch_EtherealRainbow-v0.2-8B/1762652580.229454
|
1762652580.229455
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
invisietch/EtherealRainbow-v0.2-8B
|
invisietch/EtherealRainbow-v0.2-8B
|
invisietch
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39032988027323057}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5102035205059678}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0823262839879154}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3028523489932886}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38267708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36527593085106386}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
invisietch
|
invisietch/EtherealRainbow-v0.3-8B
|
cc85ba7f-bbc0-43e7-a678-949fd5be8498
|
0.0.1
|
hfopenllm_v2/invisietch_EtherealRainbow-v0.3-8B/1762652580.229776
|
1762652580.2297769
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
invisietch/EtherealRainbow-v0.3-8B
|
invisietch/EtherealRainbow-v0.3-8B
|
invisietch
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36822298168858625}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5096758454539693}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07628398791540786}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30453020134228187}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39039583333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36261635638297873}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
invisietch
|
invisietch/Nimbus-Miqu-v0.1-70B
|
c36d07f4-b263-4849-86f9-d3fea355c068
|
0.0.1
|
hfopenllm_v2/invisietch_Nimbus-Miqu-v0.1-70B/1762652580.230321
|
1762652580.230322
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
invisietch/Nimbus-Miqu-v0.1-70B
|
invisietch/Nimbus-Miqu-v0.1-70B
|
invisietch
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46466819150963884}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.601030667794844}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06042296072507553}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3389261744966443}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41331249999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3853058510638298}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 68.977}
|
HF Open LLM v2
|
Dans-DiscountModels
|
Dans-DiscountModels/12b-mn-dans-reasoning-test-2
|
bd21f54f-6b0c-4db9-bb46-7a4c60f960ae
|
0.0.1
|
hfopenllm_v2/Dans-DiscountModels_12b-mn-dans-reasoning-test-2/1762652579.534956
|
1762652579.5349572
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Dans-DiscountModels/12b-mn-dans-reasoning-test-2
|
Dans-DiscountModels/12b-mn-dans-reasoning-test-2
|
Dans-DiscountModels
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3710953603106424}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48070333147041405}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0634441087613293}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27348993288590606}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37021875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2507480053191489}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
Dans-DiscountModels
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.2.0
|
d47dc284-0ed6-4853-8a54-b87b4b529150
|
0.0.1
|
hfopenllm_v2/Dans-DiscountModels_Dans-Instruct-Mix-8b-ChatML-V0.2.0/1762652579.536302
|
1762652579.536303
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.2.0
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.2.0
|
Dans-DiscountModels
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5064085515321569}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4624263551503409}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07326283987915408}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3644479166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2999501329787234}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Dans-DiscountModels
|
Dans-DiscountModels/Dans-Instruct-CoreCurriculum-12b-ChatML
|
6b61018c-249d-482b-a787-06f1e6514f29
|
0.0.1
|
hfopenllm_v2/Dans-DiscountModels_Dans-Instruct-CoreCurriculum-12b-ChatML/1762652579.535429
|
1762652579.53543
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Dans-DiscountModels/Dans-Instruct-CoreCurriculum-12b-ChatML
|
Dans-DiscountModels/Dans-Instruct-CoreCurriculum-12b-ChatML
|
Dans-DiscountModels
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21110209798889168}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4791864789096407}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04305135951661632}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2802013422818792}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3606354166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2805019946808511}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
Dans-DiscountModels
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.1.0
|
9873b58d-1ffd-44a7-bb93-15038986419a
|
0.0.1
|
hfopenllm_v2/Dans-DiscountModels_Dans-Instruct-Mix-8b-ChatML-V0.1.0/1762652579.5358772
|
1762652579.535878
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.1.0
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.1.0
|
Dans-DiscountModels
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06682048076880455}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47747656219777285}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06722054380664652}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2860738255033557}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3785833333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.328374335106383}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Dans-DiscountModels
|
Dans-DiscountModels/12b-mn-dans-reasoning-test-3
|
c9dedad4-65d4-479e-b465-912cd8885e32
|
0.0.1
|
hfopenllm_v2/Dans-DiscountModels_12b-mn-dans-reasoning-test-3/1762652579.535208
|
1762652579.535209
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Dans-DiscountModels/12b-mn-dans-reasoning-test-3
|
Dans-DiscountModels/12b-mn-dans-reasoning-test-3
|
Dans-DiscountModels
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5052593784491815}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48388753289945696}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07779456193353475}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2709731543624161}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4167604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2515791223404255}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
Dans-DiscountModels
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML
|
60db255b-d34c-4f33-91a4-279a9ccc6791
|
0.0.1
|
hfopenllm_v2/Dans-DiscountModels_Dans-Instruct-Mix-8b-ChatML/1762652579.5356538
|
1762652579.535655
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML
|
Dans-DiscountModels
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08250774611364513}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4738171816307924}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05513595166163142}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29446308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3918229166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32878989361702127}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Dans-DiscountModels
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.1.1
|
71656625-cd85-49a6-a8df-abc0b9c0ae5d
|
0.0.1
|
hfopenllm_v2/Dans-DiscountModels_Dans-Instruct-Mix-8b-ChatML-V0.1.1/1762652579.5360918
|
1762652579.5360918
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.1.1
|
Dans-DiscountModels/Dans-Instruct-Mix-8b-ChatML-V0.1.1
|
Dans-DiscountModels
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09105063453857985}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4748653313732898}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05966767371601209}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3824895833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.327875664893617}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
mosama
|
mosama/Qwen2.5-1.5B-Instruct-CoT-Reflection
|
e0d9dbcc-8df2-4207-b849-2c4984340605
|
0.0.1
|
hfopenllm_v2/mosama_Qwen2.5-1.5B-Instruct-CoT-Reflection/1762652580.373101
|
1762652580.3731022
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
mosama/Qwen2.5-1.5B-Instruct-CoT-Reflection
|
mosama/Qwen2.5-1.5B-Instruct-CoT-Reflection
|
mosama
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2870394996387363}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41093712633583523}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.027190332326283987}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26174496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3211979166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26512632978723405}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
TIGER-Lab
|
TIGER-Lab/MAmmoTH2-7B-Plus
|
93503cc0-80aa-44b5-9155-c81cd44a9ac9
|
0.0.1
|
hfopenllm_v2/TIGER-Lab_MAmmoTH2-7B-Plus/1762652579.9110248
|
1762652579.911026
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
TIGER-Lab/MAmmoTH2-7B-Plus
|
TIGER-Lab/MAmmoTH2-7B-Plus
|
TIGER-Lab
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5574664113441224}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42346949888019064}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18580060422960726}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2802013422818792}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41235416666666663}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30169547872340424}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
|
HF Open LLM v2
|
TIGER-Lab
|
TIGER-Lab/AceCodeRM-7B
|
eb1d6ce5-3b0c-477d-9ca6-2f3ff8bc4e30
|
0.0.1
|
hfopenllm_v2/TIGER-Lab_AceCodeRM-7B/1762652579.9101062
|
1762652579.910107
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
TIGER-Lab/AceCodeRM-7B
|
TIGER-Lab/AceCodeRM-7B
|
TIGER-Lab
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5854931581536988}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4773230085351336}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3466767371601209}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30453020134228187}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41920833333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3361037234042553}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalRM", "params_billions": 7.616}
|
HF Open LLM v2
|
spow12
|
spow12/ChatWaifu_22B_v2.0_preview
|
d0e259de-1261-4d31-a1d4-4689112deca0
|
0.0.1
|
hfopenllm_v2/spow12_ChatWaifu_22B_v2.0_preview/1762652580.534824
|
1762652580.5348248
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
spow12/ChatWaifu_22B_v2.0_preview
|
spow12/ChatWaifu_22B_v2.0_preview
|
spow12
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6744947849483814}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6170153091362338}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18882175226586104}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31543624161073824}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3685416666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39876994680851063}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 22.247}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.