nazdridoy commited on
Commit
8c7976b
Β·
verified Β·
1 Parent(s): a9e86b4

feat(core): Add client_name to token status reporting

Browse files

- [feat] Add `client_name` parameter to `report_token_status()` and its payload (hf_token_utils.py:102,142-143)
- [update] Add `client_name` parameter to various handler functions (chat_handler.py:chat_respond(), image_handler.py:generate_image(), generate_image_to_image(), tts_handler.py:generate_text_to_speech(), video_handler.py:generate_video())
- [update] Pass `client_name` to `report_token_status()` calls in handler functions (chat_handler.py:126,130,134,148, image_handler.py:109,118,126,134,233,242,250,258, tts_handler.py:112,121,129,137, video_handler.py:95,103,111,119)
- [update] Pass `client_name` from `handle_*` functions to their respective generation/response functions (chat_handler.py:200,276, image_handler.py:305,334, tts_handler.py:186, video_handler.py:156)
- [refactor] Rename `_username` to `username` in handler functions (chat_handler.py:180,219, image_handler.py:292,320, tts_handler.py:172, video_handler.py:144)

Files changed (5) hide show
  1. chat_handler.py +11 -8
  2. hf_token_utils.py +5 -1
  3. image_handler.py +16 -12
  4. tts_handler.py +8 -6
  5. video_handler.py +7 -5
chat_handler.py CHANGED
@@ -33,6 +33,7 @@ def chat_respond(
33
  max_tokens,
34
  temperature,
35
  top_p,
 
36
  ):
37
  """
38
  Chat completion function using HF-Inferoxy token management.
@@ -125,14 +126,14 @@ def chat_respond(
125
 
126
  # Report successful token usage
127
  if token_id:
128
- report_token_status(token_id, "success", api_key=proxy_api_key)
129
 
130
  except ConnectionError as e:
131
  # Handle proxy connection errors
132
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
133
  print(f"πŸ”Œ Chat connection error: {error_msg}")
134
  if token_id:
135
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
136
  yield format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
137
 
138
  except TimeoutError as e:
@@ -140,7 +141,7 @@ def chat_respond(
140
  error_msg = f"Request timed out: {str(e)}"
141
  print(f"⏰ Chat timeout: {error_msg}")
142
  if token_id:
143
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
144
  yield format_error_message("Timeout Error", "The request took too long. The server may be overloaded. Please try again.")
145
 
146
  except HfHubHTTPError as e:
@@ -148,7 +149,7 @@ def chat_respond(
148
  error_msg = str(e)
149
  print(f"πŸ€— Chat HF error: {error_msg}")
150
  if token_id:
151
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
152
 
153
  # Provide more user-friendly error messages
154
  if "401" in error_msg:
@@ -179,7 +180,7 @@ def handle_chat_submit(message, history, system_msg, model_name, provider, max_t
179
 
180
  # Enforce org-based access control via HF OAuth token
181
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
182
- is_allowed, access_msg, _username, _matched = check_org_access(access_token)
183
  if not is_allowed:
184
  # Show access denied as assistant message
185
  assistant_response = format_access_denied_message(access_msg)
@@ -199,7 +200,8 @@ def handle_chat_submit(message, history, system_msg, model_name, provider, max_t
199
  provider,
200
  max_tokens,
201
  temperature,
202
- top_p
 
203
  )
204
 
205
  # Stream the assistant response token by token
@@ -218,7 +220,7 @@ def handle_chat_retry(history, system_msg, model_name, provider, max_tokens, tem
218
  """
219
  # Enforce org-based access control via HF OAuth token
220
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
221
- is_allowed, access_msg, _username, _matched = check_org_access(access_token)
222
  if not is_allowed:
223
  # Show access denied as assistant message
224
  assistant_response = format_access_denied_message(access_msg)
@@ -273,7 +275,8 @@ def handle_chat_retry(history, system_msg, model_name, provider, max_tokens, tem
273
  provider,
274
  max_tokens,
275
  temperature,
276
- top_p
 
277
  )
278
 
279
  assistant_response = ""
 
33
  max_tokens,
34
  temperature,
35
  top_p,
36
+ client_name: str | None = None,
37
  ):
38
  """
39
  Chat completion function using HF-Inferoxy token management.
 
126
 
127
  # Report successful token usage
128
  if token_id:
129
+ report_token_status(token_id, "success", api_key=proxy_api_key, client_name=client_name)
130
 
131
  except ConnectionError as e:
132
  # Handle proxy connection errors
133
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
134
  print(f"πŸ”Œ Chat connection error: {error_msg}")
135
  if token_id:
136
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
137
  yield format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
138
 
139
  except TimeoutError as e:
 
141
  error_msg = f"Request timed out: {str(e)}"
142
  print(f"⏰ Chat timeout: {error_msg}")
143
  if token_id:
144
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
145
  yield format_error_message("Timeout Error", "The request took too long. The server may be overloaded. Please try again.")
146
 
147
  except HfHubHTTPError as e:
 
149
  error_msg = str(e)
150
  print(f"πŸ€— Chat HF error: {error_msg}")
151
  if token_id:
152
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
153
 
154
  # Provide more user-friendly error messages
155
  if "401" in error_msg:
 
180
 
181
  # Enforce org-based access control via HF OAuth token
182
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
183
+ is_allowed, access_msg, username, _matched = check_org_access(access_token)
184
  if not is_allowed:
185
  # Show access denied as assistant message
186
  assistant_response = format_access_denied_message(access_msg)
 
200
  provider,
201
  max_tokens,
202
  temperature,
203
+ top_p,
204
+ client_name=username
205
  )
206
 
207
  # Stream the assistant response token by token
 
220
  """
221
  # Enforce org-based access control via HF OAuth token
222
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
223
+ is_allowed, access_msg, username, _matched = check_org_access(access_token)
224
  if not is_allowed:
225
  # Show access denied as assistant message
226
  assistant_response = format_access_denied_message(access_msg)
 
275
  provider,
276
  max_tokens,
277
  temperature,
278
+ top_p,
279
+ client_name=username
280
  )
281
 
282
  assistant_response = ""
hf_token_utils.py CHANGED
@@ -99,7 +99,8 @@ def report_token_status(
99
  status: str = "success",
100
  error: Optional[str] = None,
101
  proxy_url: str = None,
102
- api_key: str = None
 
103
  ) -> bool:
104
  """
105
  Report token usage status back to the proxy server with timeout handling.
@@ -141,6 +142,9 @@ def report_token_status(
141
  if error_type:
142
  payload["error_type"] = error_type
143
 
 
 
 
144
  headers = {"Content-Type": "application/json"}
145
  if api_key:
146
  headers["Authorization"] = f"Bearer {api_key}"
 
99
  status: str = "success",
100
  error: Optional[str] = None,
101
  proxy_url: str = None,
102
+ api_key: str = None,
103
+ client_name: Optional[str] = None
104
  ) -> bool:
105
  """
106
  Report token usage status back to the proxy server with timeout handling.
 
142
  if error_type:
143
  payload["error_type"] = error_type
144
 
145
+ if client_name:
146
+ payload["client_name"] = client_name
147
+
148
  headers = {"Content-Type": "application/json"}
149
  if api_key:
150
  headers["Authorization"] = f"Bearer {api_key}"
image_handler.py CHANGED
@@ -42,6 +42,7 @@ def generate_image(
42
  num_inference_steps: int = IMAGE_CONFIG["num_inference_steps"],
43
  guidance_scale: float = IMAGE_CONFIG["guidance_scale"],
44
  seed: int = IMAGE_CONFIG["seed"],
 
45
  ):
46
  """
47
  Generate an image using the specified model and provider through HF-Inferoxy.
@@ -108,7 +109,7 @@ def generate_image(
108
 
109
  # Report successful token usage
110
  if token_id:
111
- report_token_status(token_id, "success", api_key=proxy_api_key)
112
 
113
  return image, format_success_message("Image generated", f"using {model_name} on {provider}")
114
 
@@ -117,7 +118,7 @@ def generate_image(
117
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
118
  print(f"πŸ”Œ Image connection error: {error_msg}")
119
  if token_id:
120
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
121
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
122
 
123
  except TimeoutError as e:
@@ -125,7 +126,7 @@ def generate_image(
125
  error_msg = f"Image generation timed out: {str(e)}"
126
  print(f"⏰ Image timeout: {error_msg}")
127
  if token_id:
128
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
129
  return None, format_error_message("Timeout Error", f"Image generation took too long (>{IMAGE_GENERATION_TIMEOUT//60} minutes). Try reducing image size or steps.")
130
 
131
  except HfHubHTTPError as e:
@@ -133,7 +134,7 @@ def generate_image(
133
  error_msg = str(e)
134
  print(f"πŸ€— Image HF error: {error_msg}")
135
  if token_id:
136
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
137
 
138
  # Provide more user-friendly error messages
139
  if "401" in error_msg:
@@ -165,6 +166,7 @@ def generate_image_to_image(
165
  num_inference_steps: int = IMAGE_CONFIG["num_inference_steps"],
166
  guidance_scale: float = IMAGE_CONFIG["guidance_scale"],
167
  seed: int = IMAGE_CONFIG["seed"],
 
168
  ):
169
  """
170
  Generate an image using image-to-image generation with the specified model and provider through HF-Inferoxy.
@@ -231,7 +233,7 @@ def generate_image_to_image(
231
 
232
  # Report successful token usage
233
  if token_id:
234
- report_token_status(token_id, "success", api_key=proxy_api_key)
235
 
236
  return image, format_success_message("Image-to-image generated", f"using {model_name} on {provider}")
237
 
@@ -240,7 +242,7 @@ def generate_image_to_image(
240
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
241
  print(f"πŸ”Œ Image-to-Image connection error: {error_msg}")
242
  if token_id:
243
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
244
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
245
 
246
  except TimeoutError as e:
@@ -248,7 +250,7 @@ def generate_image_to_image(
248
  error_msg = f"Image-to-image generation timed out: {str(e)}"
249
  print(f"⏰ Image-to-Image timeout: {error_msg}")
250
  if token_id:
251
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
252
  return None, format_error_message("Timeout Error", f"Image-to-image generation took too long (>{IMAGE_GENERATION_TIMEOUT//60} minutes). Try reducing steps.")
253
 
254
  except HfHubHTTPError as e:
@@ -256,7 +258,7 @@ def generate_image_to_image(
256
  error_msg = str(e)
257
  print(f"πŸ€— Image-to-Image HF error: {error_msg}")
258
  if token_id:
259
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
260
 
261
  # Provide more user-friendly error messages
262
  if "401" in error_msg:
@@ -289,7 +291,7 @@ def handle_image_to_image_generation(input_image_val, prompt_val, model_val, pro
289
 
290
  # Enforce org-based access control via HF OAuth token
291
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
292
- is_allowed, access_msg, _username, _matched = check_org_access(access_token)
293
  if not is_allowed:
294
  return None, format_access_denied_message(access_msg)
295
 
@@ -302,7 +304,8 @@ def handle_image_to_image_generation(input_image_val, prompt_val, model_val, pro
302
  negative_prompt=negative_prompt_val,
303
  num_inference_steps=steps_val,
304
  guidance_scale=guidance_val,
305
- seed=seed_val
 
306
  )
307
 
308
 
@@ -317,7 +320,7 @@ def handle_image_generation(prompt_val, model_val, provider_val, negative_prompt
317
 
318
  # Enforce org-based access control via HF OAuth token
319
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
320
- is_allowed, access_msg, _username, _matched = check_org_access(access_token)
321
  if not is_allowed:
322
  return None, format_access_denied_message(access_msg)
323
 
@@ -331,5 +334,6 @@ def handle_image_generation(prompt_val, model_val, provider_val, negative_prompt
331
  height=height_val,
332
  num_inference_steps=steps_val,
333
  guidance_scale=guidance_val,
334
- seed=seed_val
 
335
  )
 
42
  num_inference_steps: int = IMAGE_CONFIG["num_inference_steps"],
43
  guidance_scale: float = IMAGE_CONFIG["guidance_scale"],
44
  seed: int = IMAGE_CONFIG["seed"],
45
+ client_name: str | None = None,
46
  ):
47
  """
48
  Generate an image using the specified model and provider through HF-Inferoxy.
 
109
 
110
  # Report successful token usage
111
  if token_id:
112
+ report_token_status(token_id, "success", api_key=proxy_api_key, client_name=client_name)
113
 
114
  return image, format_success_message("Image generated", f"using {model_name} on {provider}")
115
 
 
118
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
119
  print(f"πŸ”Œ Image connection error: {error_msg}")
120
  if token_id:
121
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
122
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
123
 
124
  except TimeoutError as e:
 
126
  error_msg = f"Image generation timed out: {str(e)}"
127
  print(f"⏰ Image timeout: {error_msg}")
128
  if token_id:
129
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
130
  return None, format_error_message("Timeout Error", f"Image generation took too long (>{IMAGE_GENERATION_TIMEOUT//60} minutes). Try reducing image size or steps.")
131
 
132
  except HfHubHTTPError as e:
 
134
  error_msg = str(e)
135
  print(f"πŸ€— Image HF error: {error_msg}")
136
  if token_id:
137
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
138
 
139
  # Provide more user-friendly error messages
140
  if "401" in error_msg:
 
166
  num_inference_steps: int = IMAGE_CONFIG["num_inference_steps"],
167
  guidance_scale: float = IMAGE_CONFIG["guidance_scale"],
168
  seed: int = IMAGE_CONFIG["seed"],
169
+ client_name: str | None = None,
170
  ):
171
  """
172
  Generate an image using image-to-image generation with the specified model and provider through HF-Inferoxy.
 
233
 
234
  # Report successful token usage
235
  if token_id:
236
+ report_token_status(token_id, "success", api_key=proxy_api_key, client_name=client_name)
237
 
238
  return image, format_success_message("Image-to-image generated", f"using {model_name} on {provider}")
239
 
 
242
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
243
  print(f"πŸ”Œ Image-to-Image connection error: {error_msg}")
244
  if token_id:
245
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
246
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
247
 
248
  except TimeoutError as e:
 
250
  error_msg = f"Image-to-image generation timed out: {str(e)}"
251
  print(f"⏰ Image-to-Image timeout: {error_msg}")
252
  if token_id:
253
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
254
  return None, format_error_message("Timeout Error", f"Image-to-image generation took too long (>{IMAGE_GENERATION_TIMEOUT//60} minutes). Try reducing steps.")
255
 
256
  except HfHubHTTPError as e:
 
258
  error_msg = str(e)
259
  print(f"πŸ€— Image-to-Image HF error: {error_msg}")
260
  if token_id:
261
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
262
 
263
  # Provide more user-friendly error messages
264
  if "401" in error_msg:
 
291
 
292
  # Enforce org-based access control via HF OAuth token
293
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
294
+ is_allowed, access_msg, username, _matched = check_org_access(access_token)
295
  if not is_allowed:
296
  return None, format_access_denied_message(access_msg)
297
 
 
304
  negative_prompt=negative_prompt_val,
305
  num_inference_steps=steps_val,
306
  guidance_scale=guidance_val,
307
+ seed=seed_val,
308
+ client_name=username
309
  )
310
 
311
 
 
320
 
321
  # Enforce org-based access control via HF OAuth token
322
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
323
+ is_allowed, access_msg, username, _matched = check_org_access(access_token)
324
  if not is_allowed:
325
  return None, format_access_denied_message(access_msg)
326
 
 
334
  height=height_val,
335
  num_inference_steps=steps_val,
336
  guidance_scale=guidance_val,
337
+ seed=seed_val,
338
+ client_name=username
339
  )
tts_handler.py CHANGED
@@ -36,6 +36,7 @@ def generate_text_to_speech(
36
  exaggeration: float = 0.25,
37
  temperature: float = 0.7,
38
  cfg: float = 0.5,
 
39
  ):
40
  """
41
  Generate speech from text using the specified model and provider through HF-Inferoxy.
@@ -110,7 +111,7 @@ def generate_text_to_speech(
110
 
111
  # Report successful token usage
112
  if token_id:
113
- report_token_status(token_id, "success", api_key=proxy_api_key)
114
 
115
  return audio, format_success_message("Speech generated", f"using {model_name} on {provider} with voice {voice}")
116
 
@@ -119,7 +120,7 @@ def generate_text_to_speech(
119
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
120
  print(f"πŸ”Œ TTS connection error: {error_msg}")
121
  if token_id:
122
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
123
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
124
 
125
  except TimeoutError as e:
@@ -127,7 +128,7 @@ def generate_text_to_speech(
127
  error_msg = f"TTS generation timed out: {str(e)}"
128
  print(f"⏰ TTS timeout: {error_msg}")
129
  if token_id:
130
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
131
  return None, format_error_message("Timeout Error", f"TTS generation took too long (>{TTS_GENERATION_TIMEOUT//60} minutes). Try shorter text.")
132
 
133
  except HfHubHTTPError as e:
@@ -135,7 +136,7 @@ def generate_text_to_speech(
135
  error_msg = str(e)
136
  print(f"πŸ€— TTS HF error: {error_msg}")
137
  if token_id:
138
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
139
 
140
  # Provide more user-friendly error messages
141
  if "401" in error_msg:
@@ -170,7 +171,7 @@ def handle_text_to_speech_generation(text_val, model_val, provider_val, voice_va
170
 
171
  # Enforce org-based access control via HF OAuth token
172
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
173
- is_allowed, access_msg, _username, _matched = check_org_access(access_token)
174
  if not is_allowed:
175
  return None, format_access_denied_message(access_msg)
176
 
@@ -184,5 +185,6 @@ def handle_text_to_speech_generation(text_val, model_val, provider_val, voice_va
184
  audio_url=audio_url_val,
185
  exaggeration=exaggeration_val,
186
  temperature=temperature_val,
187
- cfg=cfg_val
 
188
  )
 
36
  exaggeration: float = 0.25,
37
  temperature: float = 0.7,
38
  cfg: float = 0.5,
39
+ client_name: str | None = None,
40
  ):
41
  """
42
  Generate speech from text using the specified model and provider through HF-Inferoxy.
 
111
 
112
  # Report successful token usage
113
  if token_id:
114
+ report_token_status(token_id, "success", api_key=proxy_api_key, client_name=client_name)
115
 
116
  return audio, format_success_message("Speech generated", f"using {model_name} on {provider} with voice {voice}")
117
 
 
120
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
121
  print(f"πŸ”Œ TTS connection error: {error_msg}")
122
  if token_id:
123
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
124
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
125
 
126
  except TimeoutError as e:
 
128
  error_msg = f"TTS generation timed out: {str(e)}"
129
  print(f"⏰ TTS timeout: {error_msg}")
130
  if token_id:
131
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
132
  return None, format_error_message("Timeout Error", f"TTS generation took too long (>{TTS_GENERATION_TIMEOUT//60} minutes). Try shorter text.")
133
 
134
  except HfHubHTTPError as e:
 
136
  error_msg = str(e)
137
  print(f"πŸ€— TTS HF error: {error_msg}")
138
  if token_id:
139
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
140
 
141
  # Provide more user-friendly error messages
142
  if "401" in error_msg:
 
171
 
172
  # Enforce org-based access control via HF OAuth token
173
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
174
+ is_allowed, access_msg, username, _matched = check_org_access(access_token)
175
  if not is_allowed:
176
  return None, format_access_denied_message(access_msg)
177
 
 
185
  audio_url=audio_url_val,
186
  exaggeration=exaggeration_val,
187
  temperature=temperature_val,
188
+ cfg=cfg_val,
189
+ client_name=username
190
  )
video_handler.py CHANGED
@@ -32,6 +32,7 @@ def generate_video(
32
  num_inference_steps: int | None = None,
33
  guidance_scale: float | None = None,
34
  seed: int | None = None,
 
35
  ):
36
  """
37
  Generate a video using the specified model and provider through HF-Inferoxy.
@@ -93,7 +94,7 @@ def generate_video(
93
 
94
  # Report successful token usage
95
  if token_id:
96
- report_token_status(token_id, "success", api_key=proxy_api_key)
97
 
98
  return video_output, format_success_message("Video generated", f"using {model_name} on {provider}")
99
 
@@ -101,21 +102,21 @@ def generate_video(
101
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
102
  print(f"πŸ”Œ Video connection error: {error_msg}")
103
  if token_id:
104
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
105
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
106
 
107
  except TimeoutError as e:
108
  error_msg = f"Video generation timed out: {str(e)}"
109
  print(f"⏰ Video timeout: {error_msg}")
110
  if token_id:
111
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
112
  return None, format_error_message("Timeout Error", f"Video generation took too long (>{VIDEO_GENERATION_TIMEOUT//60} minutes). Try a shorter prompt.")
113
 
114
  except HfHubHTTPError as e:
115
  error_msg = str(e)
116
  print(f"πŸ€— Video HF error: {error_msg}")
117
  if token_id:
118
- report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
119
  if "401" in error_msg:
120
  return None, format_error_message("Authentication Error", "Invalid or expired API token. The proxy will provide a new token on retry.")
121
  elif "402" in error_msg:
@@ -141,7 +142,7 @@ def handle_video_generation(prompt_val, model_val, provider_val, steps_val, guid
141
  return None, format_error_message("Validation Error", "Please enter a prompt for video generation")
142
 
143
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
144
- is_allowed, access_msg, _username, _matched = check_org_access(access_token)
145
  if not is_allowed:
146
  return None, format_access_denied_message(access_msg)
147
 
@@ -152,6 +153,7 @@ def handle_video_generation(prompt_val, model_val, provider_val, steps_val, guid
152
  num_inference_steps=steps_val if steps_val is not None else None,
153
  guidance_scale=guidance_val if guidance_val is not None else None,
154
  seed=seed_val if seed_val is not None else None,
 
155
  )
156
 
157
 
 
32
  num_inference_steps: int | None = None,
33
  guidance_scale: float | None = None,
34
  seed: int | None = None,
35
+ client_name: str | None = None,
36
  ):
37
  """
38
  Generate a video using the specified model and provider through HF-Inferoxy.
 
94
 
95
  # Report successful token usage
96
  if token_id:
97
+ report_token_status(token_id, "success", api_key=proxy_api_key, client_name=client_name)
98
 
99
  return video_output, format_success_message("Video generated", f"using {model_name} on {provider}")
100
 
 
102
  error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
103
  print(f"πŸ”Œ Video connection error: {error_msg}")
104
  if token_id:
105
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
106
  return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
107
 
108
  except TimeoutError as e:
109
  error_msg = f"Video generation timed out: {str(e)}"
110
  print(f"⏰ Video timeout: {error_msg}")
111
  if token_id:
112
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
113
  return None, format_error_message("Timeout Error", f"Video generation took too long (>{VIDEO_GENERATION_TIMEOUT//60} minutes). Try a shorter prompt.")
114
 
115
  except HfHubHTTPError as e:
116
  error_msg = str(e)
117
  print(f"πŸ€— Video HF error: {error_msg}")
118
  if token_id:
119
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
120
  if "401" in error_msg:
121
  return None, format_error_message("Authentication Error", "Invalid or expired API token. The proxy will provide a new token on retry.")
122
  elif "402" in error_msg:
 
142
  return None, format_error_message("Validation Error", "Please enter a prompt for video generation")
143
 
144
  access_token = getattr(hf_token, "token", None) if hf_token is not None else None
145
+ is_allowed, access_msg, username, _matched = check_org_access(access_token)
146
  if not is_allowed:
147
  return None, format_access_denied_message(access_msg)
148
 
 
153
  num_inference_steps=steps_val if steps_val is not None else None,
154
  guidance_scale=guidance_val if guidance_val is not None else None,
155
  seed=seed_val if seed_val is not None else None,
156
+ client_name=username,
157
  )
158
 
159