Spaces:
Running
Running
File size: 9,973 Bytes
d051ea8 730c6ee d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 70680dd d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 70680dd d051ea8 1e52982 d051ea8 1c9cb23 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 70680dd d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 70680dd d051ea8 70680dd d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 949b391 1c9cb23 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 d051ea8 1e52982 27cc8b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
"""
Core functionality of SlideDeck AI.
"""
import logging
import os
import pathlib
import tempfile
from typing import Union, Any
import json5
from dotenv import load_dotenv
from . import global_config as gcfg
from .global_config import GlobalConfig
from .helpers import file_manager as filem
from .helpers import llm_helper, pptx_helper, text_helper
from .helpers.chat_helper import ChatMessageHistory
load_dotenv()
RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
VALID_MODEL_NAMES = list(GlobalConfig.VALID_MODELS.keys())
VALID_TEMPLATE_NAMES = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
logger = logging.getLogger(__name__)
def _process_llm_chunk(chunk: Any) -> str:
"""
Helper function to process LLM response chunks consistently.
Args:
chunk: The chunk received from the LLM stream.
Returns:
The processed text from the chunk.
"""
if isinstance(chunk, str):
return chunk
content = getattr(chunk, 'content', None)
return content if content is not None else str(chunk)
def _stream_llm_response(llm: Any, prompt: str, progress_callback=None) -> str:
"""
Helper function to stream LLM responses with consistent handling.
Args:
llm: The LLM instance to use for generating responses.
prompt: The prompt to send to the LLM.
progress_callback: A callback function to report progress.
Returns:
The complete response from the LLM.
Raises:
RuntimeError: If there's an error getting response from LLM.
"""
response = ''
try:
for chunk in llm.stream(prompt):
chunk_text = _process_llm_chunk(chunk)
response += chunk_text
if progress_callback:
progress_callback(len(response))
return response
except Exception as e:
logger.error('Error streaming LLM response: %s', str(e))
raise RuntimeError(f'Failed to get response from LLM: {str(e)}') from e
class SlideDeckAI:
"""
The main class for generating slide decks.
"""
def __init__(
self,
model: str,
topic: str,
api_key: str = None,
pdf_path_or_stream=None,
pdf_page_range=None,
template_idx: int = 0
):
"""
Initialize the SlideDeckAI object.
Args:
model: The name of the LLM model to use.
topic: The topic of the slide deck.
api_key: The API key for the LLM provider.
pdf_path_or_stream: The path to a PDF file or a file-like object.
pdf_page_range: A tuple representing the page range to use from the PDF file.
template_idx: The index of the PowerPoint template to use.
Raises:
ValueError: If the model name is not in VALID_MODELS.
"""
if model not in GlobalConfig.VALID_MODELS:
raise ValueError(
f'Invalid model name: {model}.'
f' Must be one of: {", ".join(VALID_MODEL_NAMES)}.'
)
self.model: str = model
self.topic: str = topic
self.api_key: str = api_key
self.pdf_path_or_stream = pdf_path_or_stream
self.pdf_page_range = pdf_page_range
# Validate template_idx is within valid range
num_templates = len(GlobalConfig.PPTX_TEMPLATE_FILES)
self.template_idx: int = template_idx if 0 <= template_idx < num_templates else 0
self.chat_history = ChatMessageHistory()
self.last_response = None
logger.info('Using model: %s', model)
def _initialize_llm(self):
"""
Initialize and return an LLM instance with the current configuration.
Returns:
Configured LLM instance.
"""
provider, llm_name = llm_helper.get_provider_model(
self.model,
use_ollama=RUN_IN_OFFLINE_MODE
)
return llm_helper.get_litellm_llm(
provider=provider,
model=llm_name,
max_new_tokens=gcfg.get_max_output_tokens(self.model),
api_key=self.api_key,
)
def _get_prompt_template(self, is_refinement: bool) -> str:
"""
Return a prompt template.
Args:
is_refinement: Whether this is the initial or refinement prompt.
Returns:
The prompt template as f-string.
"""
if is_refinement:
with open(GlobalConfig.REFINEMENT_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
template = in_file.read()
else:
with open(GlobalConfig.INITIAL_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
template = in_file.read()
return template
def generate(self, progress_callback=None):
"""
Generate the initial slide deck.
Args:
progress_callback: Optional callback function to report progress.
Returns:
The path to the generated .pptx file.
"""
additional_info = ''
if self.pdf_path_or_stream:
additional_info = filem.get_pdf_contents(self.pdf_path_or_stream, self.pdf_page_range)
self.chat_history.add_user_message(self.topic)
prompt_template = self._get_prompt_template(is_refinement=False)
formatted_template = prompt_template.format(
question=self.topic,
additional_info=additional_info
)
llm = self._initialize_llm()
response = _stream_llm_response(llm, formatted_template, progress_callback)
self.last_response = text_helper.get_clean_json(response)
self.chat_history.add_ai_message(self.last_response)
return self._generate_slide_deck(self.last_response)
def revise(self, instructions, progress_callback=None):
"""
Revise the slide deck with new instructions.
Args:
instructions: The instructions for revising the slide deck.
progress_callback: Optional callback function to report progress.
Returns:
The path to the revised .pptx file.
Raises:
ValueError: If no slide deck exists or chat history is full.
"""
if not self.last_response:
raise ValueError('You must generate a slide deck before you can revise it.')
if len(self.chat_history.messages) >= 16:
raise ValueError('Chat history is full. Please reset to continue.')
self.chat_history.add_user_message(instructions)
prompt_template = self._get_prompt_template(is_refinement=True)
list_of_msgs = [
f'{idx + 1}. {msg.content}'
for idx, msg in enumerate(self.chat_history.messages) if msg.role == 'user'
]
additional_info = ''
if self.pdf_path_or_stream:
additional_info = filem.get_pdf_contents(self.pdf_path_or_stream, self.pdf_page_range)
formatted_template = prompt_template.format(
instructions='\n'.join(list_of_msgs),
previous_content=self.last_response,
additional_info=additional_info,
)
llm = self._initialize_llm()
response = _stream_llm_response(llm, formatted_template, progress_callback)
self.last_response = text_helper.get_clean_json(response)
self.chat_history.add_ai_message(self.last_response)
return self._generate_slide_deck(self.last_response)
def _generate_slide_deck(self, json_str: str) -> Union[pathlib.Path, None]:
"""
Create a slide deck and return the file path.
Args:
json_str: The content in valid JSON format.
Returns:
The path to the .pptx file or None in case of error.
"""
try:
parsed_data = json5.loads(json_str)
except (ValueError, RecursionError) as e:
logger.error('Error parsing JSON: %s', e)
try:
parsed_data = json5.loads(text_helper.fix_malformed_json(json_str))
except (ValueError, RecursionError) as e2:
logger.error('Error parsing fixed JSON: %s', e2)
return None
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
path = pathlib.Path(temp.name)
temp.close()
try:
pptx_helper.generate_powerpoint_presentation(
parsed_data,
slides_template=VALID_TEMPLATE_NAMES[self.template_idx],
output_file_path=path
)
except Exception as ex:
logger.exception('Caught a generic exception: %s', str(ex))
return None
return path
def set_model(self, model_name: str, api_key: str | None = None):
"""
Set the LLM model (and API key) to use.
Args:
model_name: The name of the model to use.
api_key: The API key for the LLM provider.
Raises:
ValueError: If the model name is not in VALID_MODELS.
"""
if model_name not in GlobalConfig.VALID_MODELS:
raise ValueError(
f'Invalid model name: {model_name}.'
f' Must be one of: {", ".join(VALID_MODEL_NAMES)}.'
)
self.model = model_name
if api_key:
self.api_key = api_key
logger.debug('Model set to: %s', model_name)
def set_template(self, idx):
"""
Set the PowerPoint template to use.
Args:
idx: The index of the template to use.
"""
num_templates = len(GlobalConfig.PPTX_TEMPLATE_FILES)
self.template_idx = idx if 0 <= idx < num_templates else 0
def reset(self):
"""
Reset the chat history and internal state.
"""
self.chat_history = ChatMessageHistory()
self.last_response = None
self.template_idx = 0
self.topic = ''
|