Spaces:
Sleeping
Sleeping
Category of benchmark
Browse files
app.py
CHANGED
|
@@ -206,15 +206,20 @@ with gr.Blocks(theme=gr.themes.Soft(text_size=text_md), css=custom_css) as main:
|
|
| 206 |
gr.Markdown(LEADERBOARD_TAB_TITLE_MARKDOWN)
|
| 207 |
|
| 208 |
with gr.Row():
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
|
| 219 |
with gr.Tab('Model details'):
|
| 220 |
gr.Markdown(MORE_DETAILS_MARKDOWN)
|
|
|
|
| 206 |
gr.Markdown(LEADERBOARD_TAB_TITLE_MARKDOWN)
|
| 207 |
|
| 208 |
with gr.Row():
|
| 209 |
+
category_of_tasks = gr.Dropdown(
|
| 210 |
+
choices=[leaderboard_server.tasks_category_overall] + list(leaderboard_server.tasks_categories),
|
| 211 |
+
label="Category of benchmark",
|
| 212 |
+
interactive=True,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
results_table = gr.DataFrame(
|
| 216 |
+
leaderboard_server.get_leaderboard(),
|
| 217 |
+
interactive=False,
|
| 218 |
+
label=None,
|
| 219 |
+
visible=True,
|
| 220 |
+
datatype="markdown",
|
| 221 |
+
elem_classes="leaderboard-table",
|
| 222 |
+
)
|
| 223 |
|
| 224 |
with gr.Tab('Model details'):
|
| 225 |
gr.Markdown(MORE_DETAILS_MARKDOWN)
|
server.py
CHANGED
|
@@ -55,6 +55,7 @@ class LeaderboardServer:
|
|
| 55 |
self.submission_id_to_file = {} # Map submission ids to file paths
|
| 56 |
self.tasks_metadata = json.load(open(TASKS_METADATA_PATH))
|
| 57 |
self.tasks_categories = {self.tasks_metadata[task]["category"] for task in self.tasks_metadata}
|
|
|
|
| 58 |
self.submission_ids = set()
|
| 59 |
self.fetch_existing_models()
|
| 60 |
self.tournament_results = self.load_tournament_results()
|
|
@@ -90,8 +91,9 @@ class LeaderboardServer:
|
|
| 90 |
|
| 91 |
self.submission_id_to_file[submission_id] = submission_file
|
| 92 |
|
| 93 |
-
def get_leaderboard(self, tournament_results=None, category=
|
| 94 |
tournament_results = tournament_results if tournament_results else self.tournament_results
|
|
|
|
| 95 |
|
| 96 |
if len(tournament_results) == 0:
|
| 97 |
return pd.DataFrame(columns=['No submissions yet'])
|
|
@@ -118,7 +120,7 @@ class LeaderboardServer:
|
|
| 118 |
for task in self.tasks_metadata.keys():
|
| 119 |
|
| 120 |
task_category = self.tasks_metadata[task]["category"]
|
| 121 |
-
if category not in (
|
| 122 |
continue
|
| 123 |
else:
|
| 124 |
# tournament_results
|
|
@@ -144,7 +146,7 @@ class LeaderboardServer:
|
|
| 144 |
for c in win_score:
|
| 145 |
win_score[c] = sum(win_score[c]) / len(win_score[c])
|
| 146 |
|
| 147 |
-
if category ==
|
| 148 |
for c in win_score:
|
| 149 |
local_results[c] = win_score[c]
|
| 150 |
local_results["average_score"] = sum(win_score.values()) / len(win_score)
|
|
|
|
| 55 |
self.submission_id_to_file = {} # Map submission ids to file paths
|
| 56 |
self.tasks_metadata = json.load(open(TASKS_METADATA_PATH))
|
| 57 |
self.tasks_categories = {self.tasks_metadata[task]["category"] for task in self.tasks_metadata}
|
| 58 |
+
self.tasks_category_overall = "overall"
|
| 59 |
self.submission_ids = set()
|
| 60 |
self.fetch_existing_models()
|
| 61 |
self.tournament_results = self.load_tournament_results()
|
|
|
|
| 91 |
|
| 92 |
self.submission_id_to_file[submission_id] = submission_file
|
| 93 |
|
| 94 |
+
def get_leaderboard(self, tournament_results=None, category=None):
|
| 95 |
tournament_results = tournament_results if tournament_results else self.tournament_results
|
| 96 |
+
category = category if category else self.tasks_category_overall
|
| 97 |
|
| 98 |
if len(tournament_results) == 0:
|
| 99 |
return pd.DataFrame(columns=['No submissions yet'])
|
|
|
|
| 120 |
for task in self.tasks_metadata.keys():
|
| 121 |
|
| 122 |
task_category = self.tasks_metadata[task]["category"]
|
| 123 |
+
if category not in (self.tasks_category_overall, task_category):
|
| 124 |
continue
|
| 125 |
else:
|
| 126 |
# tournament_results
|
|
|
|
| 146 |
for c in win_score:
|
| 147 |
win_score[c] = sum(win_score[c]) / len(win_score[c])
|
| 148 |
|
| 149 |
+
if category == self.tasks_category_overall:
|
| 150 |
for c in win_score:
|
| 151 |
local_results[c] = win_score[c]
|
| 152 |
local_results["average_score"] = sum(win_score.values()) / len(win_score)
|