ycy commited on
Commit
040a4a4
·
1 Parent(s): f68345e
app.py CHANGED
@@ -107,84 +107,53 @@ with demo:
107
  with gr.Row():
108
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
109
 
110
- with gr.Column():
111
- with gr.Accordion(
112
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
113
- open=False,
114
- ):
115
- with gr.Row():
116
- finished_eval_table = gr.components.Dataframe(
117
- value=finished_eval_queue_df,
118
- headers=EVAL_COLS,
119
- datatype=EVAL_TYPES,
120
- row_count=5,
121
- )
122
- with gr.Accordion(
123
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
124
- open=False,
125
- ):
126
- with gr.Row():
127
- running_eval_table = gr.components.Dataframe(
128
- value=running_eval_queue_df,
129
- headers=EVAL_COLS,
130
- datatype=EVAL_TYPES,
131
- row_count=5,
132
- )
133
-
134
- with gr.Accordion(
135
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
136
- open=False,
137
- ):
138
- with gr.Row():
139
- pending_eval_table = gr.components.Dataframe(
140
- value=pending_eval_queue_df,
141
- headers=EVAL_COLS,
142
- datatype=EVAL_TYPES,
143
- row_count=5,
144
- )
145
  with gr.Row():
146
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
147
 
148
  with gr.Row():
149
  with gr.Column():
150
- model_name_textbox = gr.Textbox(label="Model name")
151
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
152
- model_type = gr.Dropdown(
153
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
154
- label="Model type",
155
- multiselect=False,
156
- value=None,
157
- interactive=True,
158
- )
159
-
160
- with gr.Column():
161
- precision = gr.Dropdown(
162
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
163
- label="Precision",
164
- multiselect=False,
165
- value="float16",
166
- interactive=True,
167
- )
168
- weight_type = gr.Dropdown(
169
- choices=[i.value.name for i in WeightType],
170
- label="Weights type",
171
- multiselect=False,
172
- value="Original",
173
- interactive=True,
174
- )
175
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
176
-
177
  submit_button = gr.Button("Submit Eval")
178
  submission_result = gr.Markdown()
179
  submit_button.click(
180
  add_new_eval,
181
  [
182
- model_name_textbox,
183
- base_model_name_textbox,
184
- revision_name_textbox,
185
- precision,
186
- weight_type,
187
- model_type,
188
  ],
189
  submission_result,
190
  )
 
107
  with gr.Row():
108
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
109
 
110
+ # with gr.Column():
111
+ # with gr.Accordion(
112
+ # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
113
+ # open=False,
114
+ # ):
115
+ # with gr.Row():
116
+ # finished_eval_table = gr.components.Dataframe(
117
+ # value=finished_eval_queue_df,
118
+ # headers=EVAL_COLS,
119
+ # datatype=EVAL_TYPES,
120
+ # row_count=5,
121
+ # )
122
+ # with gr.Accordion(
123
+ # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
124
+ # open=False,
125
+ # ):
126
+ # with gr.Row():
127
+ # running_eval_table = gr.components.Dataframe(
128
+ # value=running_eval_queue_df,
129
+ # headers=EVAL_COLS,
130
+ # datatype=EVAL_TYPES,
131
+ # row_count=5,
132
+ # )
133
+
134
+ # with gr.Accordion(
135
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
136
+ # open=False,
137
+ # ):
138
+ # with gr.Row():
139
+ # pending_eval_table = gr.components.Dataframe(
140
+ # value=pending_eval_queue_df,
141
+ # headers=EVAL_COLS,
142
+ # datatype=EVAL_TYPES,
143
+ # row_count=5,
144
+ # )
145
  with gr.Row():
146
+ gr.Markdown("# ✉️✨ Submit Open model here!", elem_classes="markdown-text")
147
 
148
  with gr.Row():
149
  with gr.Column():
150
+ model_name = gr.Textbox(label="Model name")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  submit_button = gr.Button("Submit Eval")
152
  submission_result = gr.Markdown()
153
  submit_button.click(
154
  add_new_eval,
155
  [
156
+ model_name
 
 
 
 
 
157
  ],
158
  submission_result,
159
  )
src/about.py CHANGED
@@ -85,57 +85,7 @@ To reproduce our results, here is the commands you can run:
85
  """
86
 
87
  EVALUATION_QUEUE_TEXT = """
88
- <div style="font-family: 'Arial', sans-serif; padding: 20px;">
89
 
90
- <h2 style="font-size: 28px; font-weight: bold; text-align: center; color: #4CAF50;">Model Submission</h2>
91
-
92
- <p style="font-size: 18px; color: #555;">
93
- We are excited to invite you to submit your Vision-Language Model for evaluation. Please choose one of the following submission methods:
94
- </p>
95
-
96
- <!-- Open-source model submission -->
97
- <div style="margin-top: 20px; padding: 10px; background-color: #f1f1f1; border-radius: 8px;">
98
- <h3 style="font-size: 22px; color: #6A5ACD;">1. Open-Source Model Submission</h3>
99
- <p style="font-size: 16px; color: #444;">
100
- If your model is open-source, simply provide the model name in the following format: <b>org/modelname</b>.
101
- </p>
102
- <label for="opensource-model" style="font-size: 16px; color: #444;">Model Name (org/modelname):</label>
103
- <input type="text" id="opensource-model" name="opensource-model" placeholder="e.g., org/modelname" style="width: 100%; padding: 8px; margin-top: 10px; border-radius: 4px;">
104
- </div>
105
-
106
- <!-- Closed-source model submission -->
107
- <div style="margin-top: 20px; padding: 10px; background-color: #f1f1f1; border-radius: 8px;">
108
- <h3 style="font-size: 22px; color: #6A5ACD;">2. Closed-Source Model Submission</h3>
109
- <p style="font-size: 16px; color: #444;">
110
- If your model is closed-source, please select one of the following methods to submit your model:
111
- </p>
112
-
113
- <!-- Option 1: User performs evaluation -->
114
- <div style="margin-top: 10px;">
115
- <input type="radio" id="user-evaluates" name="evaluation-method" value="user-evaluates" style="margin-right: 8px;">
116
- <label for="user-evaluates" style="font-size: 16px; color: #444;">I will evaluate my model using your provided evaluation methods and submit the model name and results.</label>
117
- </div>
118
-
119
- <!-- Option 2: We evaluate the model -->
120
- <div style="margin-top: 10px;">
121
- <input type="radio" id="we-evaluate" name="evaluation-method" value="we-evaluate" style="margin-right: 8px;">
122
- <label for="we-evaluate" style="font-size: 16px; color: #444;">I would like you to evaluate my model. Please provide the model's URL and API key.</label>
123
- </div>
124
-
125
- <!-- User Input for Closed-Source Model -->
126
- <div id="closed-source-details" style="margin-top: 20px; display: none;">
127
- <p style="font-size: 16px; color: #444;">
128
- For option 2, please provide the following information:
129
- </p>
130
- <label for="model-url" style="font-size: 16px; color: #444;">Model URL:</label>
131
- <input type="url" id="model-url" name="model-url" placeholder="https://example.com" style="width: 100%; padding: 8px; margin-top: 10px; border-radius: 4px;">
132
-
133
- <label for="api-key" style="font-size: 16px; color: #444;">API Key:</label>
134
- <input type="text" id="api-key" name="api-key" placeholder="Your API key" style="width: 100%; padding: 8px; margin-top: 10px; border-radius: 4px;">
135
- </div>
136
- </div>
137
-
138
- </div>
139
 
140
  """
141
 
 
85
  """
86
 
87
  EVALUATION_QUEUE_TEXT = """
 
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  """
91
 
src/submission/check_validity.py CHANGED
@@ -88,7 +88,7 @@ def already_submitted_models(requested_models_dir: str) -> set[str]:
88
  continue
89
  with open(os.path.join(root, file), "r") as f:
90
  info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
 
93
  # Select organisation
94
  if info["model"].count("/") == 0 or "submitted_time" not in info:
 
88
  continue
89
  with open(os.path.join(root, file), "r") as f:
90
  info = json.load(f)
91
+ file_names.append(f"{info['model']}")
92
 
93
  # Select organisation
94
  if info["model"].count("/") == 0 or "submitted_time" not in info:
src/submission/submit.py CHANGED
@@ -18,11 +18,8 @@ USERS_TO_SUBMISSION_DATES = None
18
 
19
  def add_new_eval(
20
  model: str,
21
- base_model: str,
22
- revision: str,
23
- precision: str,
24
- weight_type: str,
25
- model_type: str,
26
  ):
27
  """通过提交模型到评估队列,将信息自动保存到requests数据集中"""
28
  global REQUESTED_MODELS
@@ -30,46 +27,14 @@ def add_new_eval(
30
  if not REQUESTED_MODELS:
31
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
32
 
33
- user_name = ""
34
- model_path = model
35
- if "/" in model:
36
- user_name = model.split("/")[0]
37
- model_path = model.split("/")[1]
38
-
39
- precision = precision.split(" ")[0]
40
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
41
 
42
- if model_type is None or model_type == "":
43
- return styled_error("Please select a model type.")
44
-
45
- # Does the model actually exist?
46
- if revision == "":
47
- revision = "main"
48
-
49
- # Is the model on the hub?
50
- if weight_type in ["Delta", "Adapter"]:
51
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
52
- if not base_model_on_hub:
53
- return styled_error(f'Base model "{base_model}" {error}')
54
-
55
- if not weight_type == "Adapter":
56
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
57
- if not model_on_hub:
58
- return styled_error(f'Model "{model}" {error}')
59
-
60
  # Is the model info correctly filled?
61
- try:
62
- model_info = API.model_info(repo_id=model, revision=revision)
63
- except Exception:
64
- return styled_error("Could not get your model information. Please fill it up properly.")
65
-
66
- model_size = get_model_size(model_info=model_info, precision=precision)
67
-
68
- # Were the model card and license filled?
69
- try:
70
- license = model_info.cardData["license"]
71
- except Exception:
72
- return styled_error("Please select a license for your model")
73
 
74
  modelcard_OK, error_msg = check_model_card(model)
75
  if not modelcard_OK:
@@ -79,29 +44,26 @@ def add_new_eval(
79
  print("Adding new eval")
80
 
81
  eval_entry = {
82
- "model": model,
83
- "base_model": base_model,
84
- "revision": revision,
85
- "precision": precision,
86
- "weight_type": weight_type,
87
  "status": "PENDING",
88
- "submitted_time": current_time,
89
- "model_type": model_type,
90
- "likes": model_info.likes,
91
- "params": model_size,
92
- "license": license,
93
- "private": False,
94
  }
95
 
96
  # Check for duplicate submission
97
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
98
  return styled_warning("This model has been already submitted.")
99
 
100
  print("Creating eval file")
101
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
102
- os.makedirs(OUT_DIR, exist_ok=True)
103
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
104
-
 
 
 
 
105
  with open(out_path, "w") as f:
106
  f.write(json.dumps(eval_entry))
107
 
 
18
 
19
  def add_new_eval(
20
  model: str,
21
+ model_show: str,
22
+ open_source: bool
 
 
 
23
  ):
24
  """通过提交模型到评估队列,将信息自动保存到requests数据集中"""
25
  global REQUESTED_MODELS
 
27
  if not REQUESTED_MODELS:
28
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
 
 
 
 
 
 
 
 
30
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  # Is the model info correctly filled?
33
+ if open_source:
34
+ try:
35
+ model_info = API.model_info(repo_id=model)
36
+ except Exception:
37
+ return styled_error("Could not get your model information. Please fill it up properly.")
 
 
 
 
 
 
 
38
 
39
  modelcard_OK, error_msg = check_model_card(model)
40
  if not modelcard_OK:
 
44
  print("Adding new eval")
45
 
46
  eval_entry = {
47
+ "model_name": model,
48
+ "model_show": model_show,
49
+ "open_source": open_source,
 
 
50
  "status": "PENDING",
51
+ "submitted_time": str(current_time),
 
 
 
 
 
52
  }
53
 
54
  # Check for duplicate submission
55
+ if f"{model}" in REQUESTED_MODELS:
56
  return styled_warning("This model has been already submitted.")
57
 
58
  print("Creating eval file")
59
+ if open_source:
60
+ user_name = model.split("/")[0]
61
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
62
+ os.makedirs(OUT_DIR, exist_ok=True)
63
+ out_path = f"{model}_eval_request_False.json"
64
+ else:
65
+ out_path = f"{model_show}_eval_request_True.json"
66
+
67
  with open(out_path, "w") as f:
68
  f.write(json.dumps(eval_entry))
69