hamzabouajila's picture
refactor the code for better scalability and update tsac naming to sentiment analysis, adding madar dataset for transliteration and normalization eval
bde1c71
raw
history blame
10.3 kB
from dotenv import load_dotenv
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
import threading
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
BENCHMARK_COLS,
COLS,
EVAL_COLS,
EVAL_TYPES,
AutoEvalColumn,
ModelType,
fields,
WeightType,
Precision
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
from src.evaluators.run_evaluator import evaluator_runner
load_dotenv()
def restart_space():
try:
print("Restarting space...")
space_runtime = API.restart_space(repo_id=REPO_ID,token=TOKEN)
print(f"Space restarted successfully: {space_runtime}")
except Exception as e:
print(f"Error restarting space: {str(e)}")
try:
print("Attempting to download datasets again...")
snapshot_download(
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN, force_download=True
)
snapshot_download(
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN, force_download=True
)
except Exception as download_error:
print(f"Error downloading datasets: {str(download_error)}")
def init_leaderboard(dataframe):
if dataframe is None:
raise ValueError("Leaderboard DataFrame is empty or None.")
return Leaderboard(
value=dataframe,
datatype=[c.type for c in fields(AutoEvalColumn())],
select_columns=SelectColumns(
default_selection=[c.name for c in fields(AutoEvalColumn()) if c.displayed_by_default],
cant_deselect=[c.name for c in fields(AutoEvalColumn()) if c.never_hidden],
label="Select Columns to Display:",
),
search_columns=[AutoEvalColumn().model.name, AutoEvalColumn().license.name],
hide_columns=[c.name for c in fields(AutoEvalColumn()) if c.hidden],
filter_columns=[
ColumnFilter(AutoEvalColumn().model_type.name, type="checkboxgroup", label="Model types"),
ColumnFilter(AutoEvalColumn().precision.name, type="checkboxgroup", label="Precision"),
ColumnFilter(AutoEvalColumn().params.name, type="slider", min=0.01, max=150, label="Select the number of parameters (B)"),
ColumnFilter(AutoEvalColumn().still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True),
],
bool_checkboxgroup_label="Hide models",
interactive=False,
)
# API.delete_files(repo_id=QUEUE_REPO, token=TOKEN,delete_patterns=["*"],commit_message="Clearing queue",repo_type="dataset")
# API.delete_files(repo_id=RESULTS_REPO, token=TOKEN,delete_patterns=["*"],commit_message="Clearing results",repo_type="dataset")
# sys.exit(0)
### Space initialisation
try:
print(f"\n=== Starting space initialization ===")
print(f"EVAL_REQUESTS_PATH: {EVAL_REQUESTS_PATH}")
print(f"EVAL_RESULTS_PATH: {EVAL_RESULTS_PATH}")
print(f"QUEUE_REPO: {QUEUE_REPO}")
print(f"RESULTS_REPO: {RESULTS_REPO}")
print(f"TOKEN: {bool(TOKEN)}")
print("\n=== Downloading request files ===")
snapshot_download(
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN,force_download=True
)
print("\n=== Downloading results files ===")
snapshot_download(
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN,force_download=True
)
print("\n=== Loading leaderboard data ===")
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
print(f"Leaderboard DataFrame shape: {LEADERBOARD_DF.shape if LEADERBOARD_DF is not None else 'None'}")
print("\n=== Loading evaluation queue data ===")
finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
print(f"Finished eval queue shape: {finished_eval_queue_df.shape if finished_eval_queue_df is not None else 'None'}")
print(f"Running eval queue shape: {running_eval_queue_df.shape if running_eval_queue_df is not None else 'None'}")
print(f"Pending eval queue shape: {pending_eval_queue_df.shape if pending_eval_queue_df is not None else 'None'}")
except Exception as e:
print(f"\n=== Error during space initialization ===")
print(f"Error: {str(e)}")
restart_space()
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
leaderboard = init_leaderboard(LEADERBOARD_DF)
with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(INTRODUCTION_TEXT)
gr.Markdown(LLM_BENCHMARKS_TEXT)
gr.Markdown(EVALUATION_QUEUE_TEXT)
with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Column():
with gr.Accordion(
f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
open=False,
):
with gr.Row():
finished_eval_table = gr.components.Dataframe(
value=finished_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
open=False,
):
with gr.Row():
print(running_eval_queue_df)
running_eval_table = gr.components.Dataframe(
value=running_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
open=False,
):
with gr.Row():
pending_eval_table = gr.components.Dataframe(
value=pending_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Row():
gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Model name")
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
model_type = gr.Dropdown(
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
label="Model type",
multiselect=False,
value=None,
interactive=True,
)
with gr.Column():
precision = gr.Dropdown(
choices=[i.value.name for i in Precision if i != Precision.Unknown],
label="Precision",
multiselect=False,
value="float16",
interactive=True,
)
weight_type = gr.Dropdown(
choices=[i.value.name for i in WeightType],
label="Weights type",
multiselect=False,
value="Original",
interactive=True,
)
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
submit_button = gr.Button("Submit Eval")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
model_name_textbox,
base_model_name_textbox,
revision_name_textbox,
precision,
weight_type,
model_type,
],
submission_result,
)
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
# scheduler = BackgroundScheduler()
# scheduler.add_job(restart_space, "interval", seconds=120)
thread = threading.Thread(target=evaluator_runner)
# scheduler.start()
thread.start()
demo.queue(default_concurrency_limit=40).launch()