Update app_low.py
Browse files- app_low.py +31 -16
app_low.py
CHANGED
|
@@ -52,38 +52,53 @@ def enhance_prompt(user_prompt, temperature, max_tokens, chat_history):
|
|
| 52 |
|
| 53 |
return chat_history
|
| 54 |
|
|
|
|
|
|
|
| 55 |
def extract_later_part(user_prompt, generated_text):
|
| 56 |
"""
|
| 57 |
-
|
| 58 |
-
and
|
| 59 |
"""
|
| 60 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
user_prompt_clean = user_prompt.strip().lower()
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
-
if gen_clean.lower().startswith(user_prompt_clean):
|
| 65 |
-
return gen_clean[len(user_prompt_clean):].strip(", ").strip()
|
| 66 |
-
return gen_clean
|
| 67 |
|
| 68 |
# ===================== Prompt Enhancer Function =====================
|
| 69 |
def enhance_prompt1(user_prompt, temperature, max_tokens, chat_history):
|
| 70 |
-
# Build the messages for the model
|
| 71 |
messages = [
|
| 72 |
{"role": "system", "content": "Enhance and expand the following prompt with more details and context:"},
|
| 73 |
{"role": "user", "content": user_prompt}
|
| 74 |
]
|
| 75 |
-
|
| 76 |
-
# Generate model prompt
|
| 77 |
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 78 |
output = pipe(prompt, max_new_tokens=256)
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
chat_history.append({"role": "user", "content": user_prompt})
|
| 84 |
-
chat_history.append({"role": "assistant", "content": later_part})
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
return chat_history
|
| 89 |
|
|
|
|
| 52 |
|
| 53 |
return chat_history
|
| 54 |
|
| 55 |
+
import re
|
| 56 |
+
|
| 57 |
def extract_later_part(user_prompt, generated_text):
|
| 58 |
"""
|
| 59 |
+
Cleans the model output and extracts only the enhanced (later) portion.
|
| 60 |
+
Removes prompt echoes and system tags like <end_of_turn>, <start_of_turn>, etc.
|
| 61 |
"""
|
| 62 |
+
# Step 1: Clean up internal tags
|
| 63 |
+
cleaned = re.sub(r"<.*?>", "", generated_text) # Remove <end_of_turn>, <start_of_turn>, etc.
|
| 64 |
+
cleaned = cleaned.strip()
|
| 65 |
+
|
| 66 |
+
# Step 2: Normalize spaces
|
| 67 |
+
cleaned = re.sub(r"\s+", " ", cleaned)
|
| 68 |
+
|
| 69 |
+
# Step 3: Try removing the original prompt if repeated
|
| 70 |
user_prompt_clean = user_prompt.strip().lower()
|
| 71 |
+
cleaned_lower = cleaned.lower()
|
| 72 |
+
|
| 73 |
+
if cleaned_lower.startswith(user_prompt_clean):
|
| 74 |
+
cleaned = cleaned[len(user_prompt):].strip(",. ").strip()
|
| 75 |
+
|
| 76 |
+
return cleaned
|
| 77 |
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
# ===================== Prompt Enhancer Function =====================
|
| 80 |
def enhance_prompt1(user_prompt, temperature, max_tokens, chat_history):
|
|
|
|
| 81 |
messages = [
|
| 82 |
{"role": "system", "content": "Enhance and expand the following prompt with more details and context:"},
|
| 83 |
{"role": "user", "content": user_prompt}
|
| 84 |
]
|
| 85 |
+
|
|
|
|
| 86 |
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 87 |
output = pipe(prompt, max_new_tokens=256)
|
| 88 |
+
raw_output = output[0]['generated_text']
|
| 89 |
+
|
| 90 |
+
print("=== RAW MODEL OUTPUT ===")
|
| 91 |
+
print(raw_output)
|
|
|
|
|
|
|
| 92 |
|
| 93 |
+
# Extract the cleaned, later portion
|
| 94 |
+
later_part = extract_later_part(user_prompt, raw_output)
|
| 95 |
+
print("=== EXTRACTED CLEANED OUTPUT ===")
|
| 96 |
+
print(later_part)
|
| 97 |
|
| 98 |
+
# Append to chat history for Gradio
|
| 99 |
+
chat_history = chat_history or []
|
| 100 |
+
chat_history.append({"role": "user", "content": user_prompt})
|
| 101 |
+
chat_history.append({"role": "assistant", "content": later_part})
|
| 102 |
|
| 103 |
return chat_history
|
| 104 |
|