Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,13 +2,12 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import pipeline
|
| 4 |
import os
|
| 5 |
-
from huggingface_hub import login
|
| 6 |
|
| 7 |
# --- App Configuration ---
|
| 8 |
TITLE = "✍️ AI Story Outliner"
|
| 9 |
DESCRIPTION = """
|
| 10 |
-
Enter a prompt and get 10 unique story outlines from a
|
| 11 |
-
The app uses **
|
| 12 |
|
| 13 |
**How it works:**
|
| 14 |
1. Enter your story idea.
|
|
@@ -26,35 +25,22 @@ examples = [
|
|
| 26 |
]
|
| 27 |
|
| 28 |
# --- Model Initialization ---
|
| 29 |
-
# This section loads
|
| 30 |
-
# It will automatically use the HF_TOKEN secret when deployed on Hugging Face Spaces.
|
| 31 |
generator = None
|
| 32 |
model_error = None
|
| 33 |
|
| 34 |
try:
|
| 35 |
print("Initializing model... This may take a moment.")
|
| 36 |
|
| 37 |
-
#
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
if hf_token:
|
| 41 |
-
print("✅ HF_TOKEN secret found. Logging in...")
|
| 42 |
-
# Programmatically log in to Hugging Face. This is a more robust method.
|
| 43 |
-
login(token=hf_token)
|
| 44 |
-
print("✅ Login successful.")
|
| 45 |
-
else:
|
| 46 |
-
# If no token is found, raise an error to prevent the app from crashing later.
|
| 47 |
-
raise ValueError("Hugging Face token not found. Please set the HF_TOKEN secret in your Space settings.")
|
| 48 |
-
|
| 49 |
-
# Using 'mistralai/Mistral-7B-v0.1'.
|
| 50 |
-
# After login(), the token argument is no longer needed here as the session is authenticated.
|
| 51 |
generator = pipeline(
|
| 52 |
"text-generation",
|
| 53 |
-
model="
|
| 54 |
-
torch_dtype=torch.
|
| 55 |
device_map="auto" # Will use GPU if available, otherwise CPU
|
| 56 |
)
|
| 57 |
-
print("✅
|
| 58 |
|
| 59 |
except Exception as e:
|
| 60 |
model_error = e
|
|
@@ -80,19 +66,30 @@ def generate_stories(prompt: str) -> list[str]:
|
|
| 80 |
return [""] * 10
|
| 81 |
|
| 82 |
try:
|
| 83 |
-
#
|
| 84 |
-
story_prompt = f"""
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
| 86 |
### 🎬 The Hook
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
"""
|
| 88 |
|
| 89 |
# Parameters for the pipeline to generate 10 diverse results.
|
| 90 |
params = {
|
| 91 |
-
"max_new_tokens":
|
| 92 |
"num_return_sequences": 10,
|
| 93 |
"do_sample": True,
|
| 94 |
-
"temperature": 0.
|
| 95 |
-
"
|
| 96 |
"pad_token_id": generator.tokenizer.eos_token_id
|
| 97 |
}
|
| 98 |
|
|
@@ -104,11 +101,8 @@ The outline must have three parts: a dramatic hook, a concise ballad, and a sati
|
|
| 104 |
# Extract the generated text.
|
| 105 |
stories = []
|
| 106 |
for out in outputs:
|
| 107 |
-
# The model will generate the prompt plus the continuation. We extract just the new part.
|
| 108 |
full_text = out['generated_text']
|
| 109 |
-
|
| 110 |
-
generated_part = full_text.split("[/INST]")[-1].strip()
|
| 111 |
-
stories.append(generated_part)
|
| 112 |
|
| 113 |
# Ensure we return exactly 10 stories, padding if necessary.
|
| 114 |
while len(stories) < 10:
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import pipeline
|
| 4 |
import os
|
|
|
|
| 5 |
|
| 6 |
# --- App Configuration ---
|
| 7 |
TITLE = "✍️ AI Story Outliner"
|
| 8 |
DESCRIPTION = """
|
| 9 |
+
Enter a prompt and get 10 unique story outlines from a CPU-friendly AI model.
|
| 10 |
+
The app uses **DistilGPT-2**, a reliable and lightweight model, to generate creative outlines.
|
| 11 |
|
| 12 |
**How it works:**
|
| 13 |
1. Enter your story idea.
|
|
|
|
| 25 |
]
|
| 26 |
|
| 27 |
# --- Model Initialization ---
|
| 28 |
+
# This section loads a smaller, stable, and CPU-friendly model that requires no authentication.
|
|
|
|
| 29 |
generator = None
|
| 30 |
model_error = None
|
| 31 |
|
| 32 |
try:
|
| 33 |
print("Initializing model... This may take a moment.")
|
| 34 |
|
| 35 |
+
# Using 'distilgpt2', a stable and widely supported model that does not require a token.
|
| 36 |
+
# This is much more suitable for a standard CPU environment.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
generator = pipeline(
|
| 38 |
"text-generation",
|
| 39 |
+
model="distilgpt2",
|
| 40 |
+
torch_dtype=torch.float32, # Use float32 for wider CPU compatibility
|
| 41 |
device_map="auto" # Will use GPU if available, otherwise CPU
|
| 42 |
)
|
| 43 |
+
print("✅ distilgpt2 model loaded successfully!")
|
| 44 |
|
| 45 |
except Exception as e:
|
| 46 |
model_error = e
|
|
|
|
| 66 |
return [""] * 10
|
| 67 |
|
| 68 |
try:
|
| 69 |
+
# A generic story prompt that works well with models like GPT-2.
|
| 70 |
+
story_prompt = f"""
|
| 71 |
+
Story Idea: "{prompt}"
|
| 72 |
+
|
| 73 |
+
Create a short story outline based on this idea.
|
| 74 |
+
|
| 75 |
### 🎬 The Hook
|
| 76 |
+
A dramatic opening.
|
| 77 |
+
|
| 78 |
+
### 🎼 The Ballad
|
| 79 |
+
The main story, told concisely.
|
| 80 |
+
|
| 81 |
+
### 🔚 The Finale
|
| 82 |
+
A clear and satisfying ending.
|
| 83 |
+
---
|
| 84 |
"""
|
| 85 |
|
| 86 |
# Parameters for the pipeline to generate 10 diverse results.
|
| 87 |
params = {
|
| 88 |
+
"max_new_tokens": 200,
|
| 89 |
"num_return_sequences": 10,
|
| 90 |
"do_sample": True,
|
| 91 |
+
"temperature": 0.9,
|
| 92 |
+
"top_k": 50,
|
| 93 |
"pad_token_id": generator.tokenizer.eos_token_id
|
| 94 |
}
|
| 95 |
|
|
|
|
| 101 |
# Extract the generated text.
|
| 102 |
stories = []
|
| 103 |
for out in outputs:
|
|
|
|
| 104 |
full_text = out['generated_text']
|
| 105 |
+
stories.append(full_text)
|
|
|
|
|
|
|
| 106 |
|
| 107 |
# Ensure we return exactly 10 stories, padding if necessary.
|
| 108 |
while len(stories) < 10:
|