Spaces:
Sleeping
Sleeping
some
Browse files- app.py +34 -11
- chatbot_page.py +27 -10
app.py
CHANGED
|
@@ -102,6 +102,16 @@ def analyze_and_update_single_repo(repo_id: str) -> Tuple[str, str, pd.DataFrame
|
|
| 102 |
error_summary = f"Error analyzing repo: {e}"
|
| 103 |
return "", error_summary, read_csv_to_dataframe()
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
# --- Gradio UI ---
|
| 106 |
|
| 107 |
def create_ui() -> gr.Blocks:
|
|
@@ -156,7 +166,7 @@ def create_ui() -> gr.Blocks:
|
|
| 156 |
with gr.TabItem("3. Find Repos with AI", id="chatbot_tab"):
|
| 157 |
gr.Markdown("## Chat with an Assistant to Find Repositories")
|
| 158 |
chatbot = gr.Chatbot(
|
| 159 |
-
value=[
|
| 160 |
label="Chat with Assistant",
|
| 161 |
height=400,
|
| 162 |
type="messages"
|
|
@@ -218,23 +228,36 @@ def create_ui() -> gr.Blocks:
|
|
| 218 |
|
| 219 |
return content, summary, df, next_idx, status
|
| 220 |
|
| 221 |
-
def handle_user_message(user_message: str, history: List[
|
| 222 |
-
"""
|
| 223 |
-
|
|
|
|
| 224 |
return history, ""
|
| 225 |
|
| 226 |
-
def handle_bot_response(history: List[
|
| 227 |
-
"""Generates and
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
return history
|
| 232 |
|
| 233 |
-
def handle_end_chat(history: List[
|
| 234 |
"""Ends the chat and extracts keywords from the conversation."""
|
| 235 |
if not history:
|
| 236 |
return "", "Status: Chat is empty, nothing to analyze."
|
| 237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
status = "Status: Keywords extracted. You can now use them to search."
|
| 239 |
return keywords_str, status
|
| 240 |
|
|
|
|
| 102 |
error_summary = f"Error analyzing repo: {e}"
|
| 103 |
return "", error_summary, read_csv_to_dataframe()
|
| 104 |
|
| 105 |
+
# --- NEW: Helper for Chat History Conversion ---
|
| 106 |
+
def convert_messages_to_tuples(history: List[Dict[str, str]]) -> List[Tuple[str, str]]:
|
| 107 |
+
"""Converts Gradio's 'messages' format to the old 'tuple' format for compatibility."""
|
| 108 |
+
tuple_history = []
|
| 109 |
+
# Assumes a strict user-assistant-user-assistant turn structure.
|
| 110 |
+
for i in range(0, len(history), 2):
|
| 111 |
+
if i + 1 < len(history) and history[i]['role'] == 'user' and history[i+1]['role'] == 'assistant':
|
| 112 |
+
tuple_history.append((history[i]['content'], history[i+1]['content']))
|
| 113 |
+
return tuple_history
|
| 114 |
+
|
| 115 |
# --- Gradio UI ---
|
| 116 |
|
| 117 |
def create_ui() -> gr.Blocks:
|
|
|
|
| 166 |
with gr.TabItem("3. Find Repos with AI", id="chatbot_tab"):
|
| 167 |
gr.Markdown("## Chat with an Assistant to Find Repositories")
|
| 168 |
chatbot = gr.Chatbot(
|
| 169 |
+
value=[{"role": "assistant", "content": CHATBOT_INITIAL_MESSAGE}],
|
| 170 |
label="Chat with Assistant",
|
| 171 |
height=400,
|
| 172 |
type="messages"
|
|
|
|
| 228 |
|
| 229 |
return content, summary, df, next_idx, status
|
| 230 |
|
| 231 |
+
def handle_user_message(user_message: str, history: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], str]:
|
| 232 |
+
"""Appends the user's message to the history, preparing for the bot's response."""
|
| 233 |
+
if user_message:
|
| 234 |
+
history.append({"role": "user", "content": user_message})
|
| 235 |
return history, ""
|
| 236 |
|
| 237 |
+
def handle_bot_response(history: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
| 238 |
+
"""Generates and appends the bot's response using the compatible history format."""
|
| 239 |
+
if not history or history[-1]["role"] != "user":
|
| 240 |
+
return history
|
| 241 |
+
|
| 242 |
+
user_message = history[-1]["content"]
|
| 243 |
+
# Convert all messages *before* the last user message into tuples for the API
|
| 244 |
+
tuple_history_for_api = convert_messages_to_tuples(history[:-1])
|
| 245 |
+
|
| 246 |
+
response = chat_with_user(user_message, tuple_history_for_api, CHATBOT_SYSTEM_PROMPT)
|
| 247 |
+
history.append({"role": "assistant", "content": response})
|
| 248 |
return history
|
| 249 |
|
| 250 |
+
def handle_end_chat(history: List[Dict[str, str]]) -> Tuple[str, str]:
|
| 251 |
"""Ends the chat and extracts keywords from the conversation."""
|
| 252 |
if not history:
|
| 253 |
return "", "Status: Chat is empty, nothing to analyze."
|
| 254 |
+
|
| 255 |
+
# Convert the full, valid history for the extraction logic
|
| 256 |
+
tuple_history = convert_messages_to_tuples(history)
|
| 257 |
+
if not tuple_history:
|
| 258 |
+
return "", "Status: No completed conversations to analyze."
|
| 259 |
+
|
| 260 |
+
keywords_str = extract_keywords_from_conversation(tuple_history)
|
| 261 |
status = "Status: Keywords extracted. You can now use them to search."
|
| 262 |
return keywords_str, status
|
| 263 |
|
chatbot_page.py
CHANGED
|
@@ -69,29 +69,46 @@ def extract_keywords_from_conversation(history):
|
|
| 69 |
|
| 70 |
with gr.Blocks() as chatbot_demo:
|
| 71 |
gr.Markdown("## Repo Recommendation Chatbot")
|
| 72 |
-
chatbot = gr.Chatbot()
|
| 73 |
# Initial assistant message only
|
| 74 |
initial_message = "Hello! Please tell me about your ideal Hugging Face repo. What use case, preferred language, or features are you looking for?"
|
| 75 |
-
state = gr.State([
|
| 76 |
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...")
|
| 77 |
send_btn = gr.Button("Send")
|
| 78 |
end_btn = gr.Button("End Chat and Extract Keywords")
|
| 79 |
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False)
|
| 80 |
|
| 81 |
-
def user_send(user_message,
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
return keywords
|
| 89 |
|
| 90 |
# Reset state to initial message when chatbot page is loaded
|
| 91 |
def reset_chat_state():
|
| 92 |
-
return [
|
| 93 |
|
| 94 |
-
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot,
|
| 95 |
end_btn.click(end_chat, inputs=state, outputs=keywords_output)
|
| 96 |
chatbot_demo.load(reset_chat_state, inputs=None, outputs=state)
|
| 97 |
|
|
|
|
| 69 |
|
| 70 |
with gr.Blocks() as chatbot_demo:
|
| 71 |
gr.Markdown("## Repo Recommendation Chatbot")
|
| 72 |
+
chatbot = gr.Chatbot(type="messages", label="Chatbot")
|
| 73 |
# Initial assistant message only
|
| 74 |
initial_message = "Hello! Please tell me about your ideal Hugging Face repo. What use case, preferred language, or features are you looking for?"
|
| 75 |
+
state = gr.State([{"role": "assistant", "content": initial_message}])
|
| 76 |
user_input = gr.Textbox(label="Your message", placeholder="Describe your ideal repo or answer the assistant's questions...")
|
| 77 |
send_btn = gr.Button("Send")
|
| 78 |
end_btn = gr.Button("End Chat and Extract Keywords")
|
| 79 |
keywords_output = gr.Textbox(label="Extracted Keywords for Repo Search", interactive=False)
|
| 80 |
|
| 81 |
+
def user_send(user_message, history_messages):
|
| 82 |
+
# Add user message to the UI
|
| 83 |
+
history_messages.append({"role": "user", "content": user_message})
|
| 84 |
+
|
| 85 |
+
# Convert to tuple format for the API call
|
| 86 |
+
tuple_history = []
|
| 87 |
+
for i in range(0, len(history_messages) -1, 2): # Exclude the last user message
|
| 88 |
+
if i + 1 < len(history_messages):
|
| 89 |
+
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content']))
|
| 90 |
|
| 91 |
+
# Get bot response and add to UI
|
| 92 |
+
assistant_reply = chat_with_user(user_message, tuple_history)
|
| 93 |
+
history_messages.append({"role": "assistant", "content": assistant_reply})
|
| 94 |
+
|
| 95 |
+
return history_messages, ""
|
| 96 |
+
|
| 97 |
+
def end_chat(history_messages):
|
| 98 |
+
# Convert to tuple format for the API call
|
| 99 |
+
tuple_history = []
|
| 100 |
+
for i in range(0, len(history_messages), 2):
|
| 101 |
+
if i + 1 < len(history_messages):
|
| 102 |
+
tuple_history.append((history_messages[i]['content'], history_messages[i+1]['content']))
|
| 103 |
+
|
| 104 |
+
keywords = extract_keywords_from_conversation(tuple_history)
|
| 105 |
return keywords
|
| 106 |
|
| 107 |
# Reset state to initial message when chatbot page is loaded
|
| 108 |
def reset_chat_state():
|
| 109 |
+
return [{"role": "assistant", "content": initial_message}]
|
| 110 |
|
| 111 |
+
send_btn.click(user_send, inputs=[user_input, state], outputs=[chatbot, user_input])
|
| 112 |
end_btn.click(end_chat, inputs=state, outputs=keywords_output)
|
| 113 |
chatbot_demo.load(reset_chat_state, inputs=None, outputs=state)
|
| 114 |
|