nazdridoy commited on
Commit
9a48f85
·
verified ·
1 Parent(s): c40f3d0

feat(chat): add message retry functionality

Browse files

- [feat] Add `handle_chat_retry` function to implement assistant response retry logic (chat_handler.py:196)
- [feat] Implement retry logic in `handle_chat_retry` to trim history and re-invoke `chat_respond` (chat_handler.py:handle_chat_retry())
- [feat] Modify `create_chat_tab` to accept `handle_chat_retry_fn` parameter (ui_components.py:create_chat_tab():14)
- [feat] Call `chatbot_display.retry()` with `handle_chat_retry_fn` when provided (ui_components.py:create_chat_tab():83)
- [feat] Import `handle_chat_retry` from `chat_handler` (app.py:5)
- [feat] Pass `handle_chat_retry` to `create_chat_tab` in `create_app` (app.py:create_app():29)

Files changed (3) hide show
  1. app.py +2 -2
  2. chat_handler.py +62 -0
  3. ui_components.py +12 -1
app.py CHANGED
@@ -4,7 +4,7 @@ A comprehensive AI platform with chat and image generation capabilities.
4
  """
5
 
6
  import gradio as gr
7
- from chat_handler import handle_chat_submit
8
  from image_handler import handle_image_generation
9
  from ui_components import (
10
  create_main_header,
@@ -27,7 +27,7 @@ def create_app():
27
  with gr.Tabs() as tabs:
28
 
29
  # Chat tab
30
- create_chat_tab(handle_chat_submit)
31
 
32
  # Image generation tab
33
  create_image_tab(handle_image_generation)
 
4
  """
5
 
6
  import gradio as gr
7
+ from chat_handler import handle_chat_submit, handle_chat_retry
8
  from image_handler import handle_image_generation
9
  from ui_components import (
10
  create_main_header,
 
27
  with gr.Tabs() as tabs:
28
 
29
  # Chat tab
30
+ create_chat_tab(handle_chat_submit, handle_chat_retry)
31
 
32
  # Image generation tab
33
  create_image_tab(handle_image_generation)
chat_handler.py CHANGED
@@ -193,3 +193,65 @@ def handle_chat_submit(message, history, system_msg, model_name, max_tokens, tem
193
  # Update history with the current partial response and yield it
194
  current_history = history + [{"role": "assistant", "content": assistant_response}]
195
  yield current_history, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  # Update history with the current partial response and yield it
194
  current_history = history + [{"role": "assistant", "content": assistant_response}]
195
  yield current_history, ""
196
+
197
+
198
+ def handle_chat_retry(history, system_msg, model_name, max_tokens, temperature, top_p, retry_data=None):
199
+ """
200
+ Retry the assistant response for the selected message.
201
+ Works with gr.Chatbot.retry() which provides retry_data.index for the message.
202
+ """
203
+ # Guard: empty history
204
+ if not history:
205
+ yield history
206
+ return
207
+
208
+ # Determine which assistant message index to retry
209
+ retry_index = None
210
+ try:
211
+ retry_index = getattr(retry_data, "index", None)
212
+ except Exception:
213
+ retry_index = None
214
+
215
+ if retry_index is None:
216
+ # Fallback to last assistant message
217
+ retry_index = len(history) - 1
218
+
219
+ # Trim history up to the message being retried (exclude that assistant msg)
220
+ trimmed_history = list(history[:retry_index])
221
+
222
+ # Find the most recent user message before retry_index
223
+ last_user_idx = None
224
+ for idx in range(retry_index - 1, -1, -1):
225
+ if trimmed_history[idx].get("role") == "user":
226
+ last_user_idx = idx
227
+ break
228
+
229
+ # Nothing to retry if no prior user message
230
+ if last_user_idx is None:
231
+ yield history
232
+ return
233
+
234
+ # Message to retry and prior conversation context (before that user msg)
235
+ message = trimmed_history[last_user_idx].get("content", "")
236
+ prior_history = trimmed_history[:last_user_idx]
237
+
238
+ if not message.strip():
239
+ yield history
240
+ return
241
+
242
+ # Stream a new assistant response
243
+ response_generator = chat_respond(
244
+ message,
245
+ prior_history,
246
+ system_msg,
247
+ model_name,
248
+ max_tokens,
249
+ temperature,
250
+ top_p
251
+ )
252
+
253
+ assistant_response = ""
254
+ for partial_response in response_generator:
255
+ assistant_response = partial_response
256
+ current_history = trimmed_history + [{"role": "assistant", "content": assistant_response}]
257
+ yield current_history
ui_components.py CHANGED
@@ -11,7 +11,7 @@ from utils import (
11
  )
12
 
13
 
14
- def create_chat_tab(handle_chat_submit_fn):
15
  """
16
  Create the chat tab interface.
17
  """
@@ -81,6 +81,15 @@ def create_chat_tab(handle_chat_submit_fn):
81
  outputs=[chatbot_display, chat_input]
82
  )
83
 
 
 
 
 
 
 
 
 
 
84
 
85
  def create_chat_tips():
86
  """Create the tips section for the chat tab."""
@@ -289,3 +298,5 @@ def create_footer():
289
 
290
  **Built with ❤️ using [HF-Inferoxy](https://nazdridoy.github.io/hf-inferoxy/) for intelligent token management**
291
  """)
 
 
 
11
  )
12
 
13
 
14
+ def create_chat_tab(handle_chat_submit_fn, handle_chat_retry_fn=None):
15
  """
16
  Create the chat tab interface.
17
  """
 
81
  outputs=[chatbot_display, chat_input]
82
  )
83
 
84
+ # Enable retry icon and bind handler if provided
85
+ if handle_chat_retry_fn is not None:
86
+ chatbot_display.retry(
87
+ fn=handle_chat_retry_fn,
88
+ inputs=[chatbot_display, chat_system_message, chat_model_name,
89
+ chat_max_tokens, chat_temperature, chat_top_p],
90
+ outputs=chatbot_display
91
+ )
92
+
93
 
94
  def create_chat_tips():
95
  """Create the tips section for the chat tab."""
 
298
 
299
  **Built with ❤️ using [HF-Inferoxy](https://nazdridoy.github.io/hf-inferoxy/) for intelligent token management**
300
  """)
301
+
302
+