Spaces:
Runtime error
Runtime error
Commit
·
6eae533
1
Parent(s):
4e3a907
add meta data
Browse files- app.py +1 -1
- uploads.py +7 -0
app.py
CHANGED
|
@@ -433,7 +433,7 @@ with demo:
|
|
| 433 |
## Quick Links
|
| 434 |
|
| 435 |
- [**Website**](https://exploration-lab.github.io/IL-TUR): The landing page for IL-TUR
|
| 436 |
-
- [**arXiv Paper**](https://arxiv.org/
|
| 437 |
- [**GitHub Repository**](https://github.com/exploration-lab/IL-TUR): Access the source code, fine-tuning scripts, and additional resources for the IL-TUR dataset.
|
| 438 |
- [**Dataset on Hugging Face**](https://huggingface.co/datasets/Exploration-Lab/IL-TUR): Direct link to download the IL-TUR dataset.
|
| 439 |
- [**Leaderboard on Hugging Face Spaces**](https://huggingface.co/spaces/Exploration-Lab/IL-TUR_leaderboard): Current rankings and submissions for the IL-TUR dataset challenges.
|
|
|
|
| 433 |
## Quick Links
|
| 434 |
|
| 435 |
- [**Website**](https://exploration-lab.github.io/IL-TUR): The landing page for IL-TUR
|
| 436 |
+
- [**arXiv Paper**](https://arxiv.org/html/2407.05399v1): Detailed information about the IL-TUR dataset and its significance in unlearning tasks.
|
| 437 |
- [**GitHub Repository**](https://github.com/exploration-lab/IL-TUR): Access the source code, fine-tuning scripts, and additional resources for the IL-TUR dataset.
|
| 438 |
- [**Dataset on Hugging Face**](https://huggingface.co/datasets/Exploration-Lab/IL-TUR): Direct link to download the IL-TUR dataset.
|
| 439 |
- [**Leaderboard on Hugging Face Spaces**](https://huggingface.co/spaces/Exploration-Lab/IL-TUR_leaderboard): Current rankings and submissions for the IL-TUR dataset challenges.
|
uploads.py
CHANGED
|
@@ -91,6 +91,13 @@ def add_new_eval(
|
|
| 91 |
|
| 92 |
# Get evaluation scores
|
| 93 |
submission = get_evaluation_scores(gold_data, submission_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
else:
|
| 95 |
# Read submission directly if it's not in predictions format
|
| 96 |
with open(path_to_file, "r") as f:
|
|
|
|
| 91 |
|
| 92 |
# Get evaluation scores
|
| 93 |
submission = get_evaluation_scores(gold_data, submission_data)
|
| 94 |
+
|
| 95 |
+
# Add metadata
|
| 96 |
+
submission["Method"] = method_name
|
| 97 |
+
submission["Submitted By"] = submitted_by
|
| 98 |
+
# submission["Organisation"] = organisation
|
| 99 |
+
# submission["Email"] = mail
|
| 100 |
+
submission["Github Link"] = url
|
| 101 |
else:
|
| 102 |
# Read submission directly if it's not in predictions format
|
| 103 |
with open(path_to_file, "r") as f:
|