Spaces:
Running
Running
Commit
·
4975a65
1
Parent(s):
805de2a
updated scores
Browse files- app.py +4 -4
- src/utils.py +0 -1
app.py
CHANGED
|
@@ -33,7 +33,7 @@ COLUMNS_MAPPING = {
|
|
| 33 |
#
|
| 34 |
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
|
| 35 |
"forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
|
| 36 |
-
"
|
| 37 |
#
|
| 38 |
"num_parameters": "#️⃣ Parameters 📏",
|
| 39 |
}
|
|
@@ -61,7 +61,7 @@ def get_benchmark_df(benchmark="1xA100-80GB"):
|
|
| 61 |
|
| 62 |
# load
|
| 63 |
bench_df = pd.read_csv(f"./llm-perf-dataset/reports/{benchmark}.csv")
|
| 64 |
-
scores_df = pd.read_csv(f"./llm-perf-dataset/reports/
|
| 65 |
bench_df = bench_df.merge(scores_df, on="model", how="left")
|
| 66 |
|
| 67 |
bench_df["optimizations"] = bench_df[
|
|
@@ -99,7 +99,7 @@ def get_benchmark_plot(bench_df):
|
|
| 99 |
fig = px.scatter(
|
| 100 |
bench_df,
|
| 101 |
x="generate.latency(s)",
|
| 102 |
-
y="
|
| 103 |
color="model_type",
|
| 104 |
symbol="backend.name",
|
| 105 |
size="forward.peak_memory(MB)",
|
|
@@ -175,7 +175,7 @@ def filter_query(
|
|
| 175 |
if len(optimizations) > 0
|
| 176 |
else True
|
| 177 |
)
|
| 178 |
-
& (raw_df["
|
| 179 |
& (raw_df["forward.peak_memory(MB)"] <= memory)
|
| 180 |
]
|
| 181 |
|
|
|
|
| 33 |
#
|
| 34 |
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
|
| 35 |
"forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
|
| 36 |
+
"score": "Average Open LLM Score ⬆️",
|
| 37 |
#
|
| 38 |
"num_parameters": "#️⃣ Parameters 📏",
|
| 39 |
}
|
|
|
|
| 61 |
|
| 62 |
# load
|
| 63 |
bench_df = pd.read_csv(f"./llm-perf-dataset/reports/{benchmark}.csv")
|
| 64 |
+
scores_df = pd.read_csv(f"./llm-perf-dataset/reports/open-llm-leaderboard.csv")
|
| 65 |
bench_df = bench_df.merge(scores_df, on="model", how="left")
|
| 66 |
|
| 67 |
bench_df["optimizations"] = bench_df[
|
|
|
|
| 99 |
fig = px.scatter(
|
| 100 |
bench_df,
|
| 101 |
x="generate.latency(s)",
|
| 102 |
+
y="score",
|
| 103 |
color="model_type",
|
| 104 |
symbol="backend.name",
|
| 105 |
size="forward.peak_memory(MB)",
|
|
|
|
| 175 |
if len(optimizations) > 0
|
| 176 |
else True
|
| 177 |
)
|
| 178 |
+
& (raw_df["score"] >= score)
|
| 179 |
& (raw_df["forward.peak_memory(MB)"] <= memory)
|
| 180 |
]
|
| 181 |
|
src/utils.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import re
|
| 2 |
from huggingface_hub import HfApi, Repository
|
| 3 |
|
| 4 |
|
|
|
|
|
|
|
| 1 |
from huggingface_hub import HfApi, Repository
|
| 2 |
|
| 3 |
|