barunsaha commited on
Commit
5eca302
Β·
unverified Β·
2 Parent(s): 68e7bc4 f146e60

Merge pull request #152 from barun-saha/refactor-to-package-v2

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. MANIFEST.in +6 -0
  2. README.md +28 -0
  3. app.py +56 -199
  4. pyproject.toml +39 -0
  5. {helpers β†’ src/slidedeckai}/__init__.py +0 -0
  6. src/slidedeckai/_version.py +1 -0
  7. src/slidedeckai/cli.py +53 -0
  8. src/slidedeckai/core.py +208 -0
  9. {file_embeddings β†’ src/slidedeckai/file_embeddings}/embeddings.npy +0 -0
  10. {file_embeddings β†’ src/slidedeckai/file_embeddings}/icons.npy +0 -0
  11. global_config.py β†’ src/slidedeckai/global_config.py +13 -11
  12. src/slidedeckai/helpers/__init__.py +0 -0
  13. {helpers β†’ src/slidedeckai/helpers}/chat_helper.py +6 -13
  14. {helpers β†’ src/slidedeckai/helpers}/file_manager.py +1 -4
  15. {helpers β†’ src/slidedeckai/helpers}/icons_embeddings.py +3 -6
  16. {helpers β†’ src/slidedeckai/helpers}/image_search.py +0 -0
  17. {helpers β†’ src/slidedeckai/helpers}/llm_helper.py +1 -3
  18. {helpers β†’ src/slidedeckai/helpers}/pptx_helper.py +3 -6
  19. {helpers β†’ src/slidedeckai/helpers}/text_helper.py +0 -0
  20. {icons β†’ src/slidedeckai/icons}/png128/0-circle.png +0 -0
  21. {icons β†’ src/slidedeckai/icons}/png128/1-circle.png +0 -0
  22. {icons β†’ src/slidedeckai/icons}/png128/123.png +0 -0
  23. {icons β†’ src/slidedeckai/icons}/png128/2-circle.png +0 -0
  24. {icons β†’ src/slidedeckai/icons}/png128/3-circle.png +0 -0
  25. {icons β†’ src/slidedeckai/icons}/png128/4-circle.png +0 -0
  26. {icons β†’ src/slidedeckai/icons}/png128/5-circle.png +0 -0
  27. {icons β†’ src/slidedeckai/icons}/png128/6-circle.png +0 -0
  28. {icons β†’ src/slidedeckai/icons}/png128/7-circle.png +0 -0
  29. {icons β†’ src/slidedeckai/icons}/png128/8-circle.png +0 -0
  30. {icons β†’ src/slidedeckai/icons}/png128/9-circle.png +0 -0
  31. {icons β†’ src/slidedeckai/icons}/png128/activity.png +0 -0
  32. {icons β†’ src/slidedeckai/icons}/png128/airplane.png +0 -0
  33. {icons β†’ src/slidedeckai/icons}/png128/alarm.png +0 -0
  34. {icons β†’ src/slidedeckai/icons}/png128/alien-head.png +0 -0
  35. {icons β†’ src/slidedeckai/icons}/png128/alphabet.png +0 -0
  36. {icons β†’ src/slidedeckai/icons}/png128/amazon.png +0 -0
  37. {icons β†’ src/slidedeckai/icons}/png128/amritsar-golden-temple.png +0 -0
  38. {icons β†’ src/slidedeckai/icons}/png128/amsterdam-canal.png +0 -0
  39. {icons β†’ src/slidedeckai/icons}/png128/amsterdam-windmill.png +0 -0
  40. {icons β†’ src/slidedeckai/icons}/png128/android.png +0 -0
  41. {icons β†’ src/slidedeckai/icons}/png128/angkor-wat.png +0 -0
  42. {icons β†’ src/slidedeckai/icons}/png128/apple.png +0 -0
  43. {icons β†’ src/slidedeckai/icons}/png128/archive.png +0 -0
  44. {icons β†’ src/slidedeckai/icons}/png128/argentina-obelisk.png +0 -0
  45. {icons β†’ src/slidedeckai/icons}/png128/artificial-intelligence-brain.png +0 -0
  46. {icons β†’ src/slidedeckai/icons}/png128/atlanta.png +0 -0
  47. {icons β†’ src/slidedeckai/icons}/png128/austin.png +0 -0
  48. {icons β†’ src/slidedeckai/icons}/png128/automation-decision.png +0 -0
  49. {icons β†’ src/slidedeckai/icons}/png128/award.png +0 -0
  50. {icons β†’ src/slidedeckai/icons}/png128/balloon.png +0 -0
MANIFEST.in ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include src/slidedeckai/strings.json
2
+ recursive-include src/slidedeckai/prompts *.txt
3
+ recursive-include src/slidedeckai/pptx_templates *.pptx
4
+ recursive-include src/slidedeckai/icons *.png
5
+ recursive-include src/slidedeckai/icons *.txt
6
+ recursive-include src/slidedeckai/file_embeddings *.npy
README.md CHANGED
@@ -41,6 +41,34 @@ Clicking on the button will download the file.
41
  In addition, SlideDeck AI can also create a presentation based on PDF files.
42
 
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  ## Summary of the LLMs
45
 
46
  SlideDeck AI allows the use of different LLMs from several online providersβ€”Azure OpenAI, Google, Cohere, Together AI, and OpenRouter. Most of these service providers offer generous free usage of relevant LLMs without requiring any billing information.
 
41
  In addition, SlideDeck AI can also create a presentation based on PDF files.
42
 
43
 
44
+ ## Python API Usage
45
+
46
+ ```python
47
+ from slidedeckai import SlideDeckAI
48
+
49
+
50
+ slide_generator = SlideDeckAI(
51
+ model='[gg]gemini-2.5-flash-lite',
52
+ topic='Make a slide deck on AI',
53
+ api_key='your-google-api-key',
54
+ )
55
+ pptx_path = slide_generator.generate()
56
+ print(f"Generated slide deck: {pptx_path}")
57
+ ```
58
+
59
+ ## CLI Usage
60
+
61
+ Generate a new slide deck:
62
+ ```bash
63
+ slidedeckai generate --model '[gg]gemini-2.5-flash-lite' --topic 'Make a slide deck on AI' --api-key 'your-google-api-key'
64
+ ```
65
+
66
+ Launch the Streamlit app:
67
+ ```bash
68
+ slidedeckai launch
69
+ ```
70
+
71
+
72
  ## Summary of the LLMs
73
 
74
  SlideDeck AI allows the use of different LLMs from several online providersβ€”Azure OpenAI, Google, Cohere, Together AI, and OpenRouter. Most of these service providers offer generous free usage of relevant LLMs without requiring any billing information.
app.py CHANGED
@@ -6,6 +6,7 @@ import logging
6
  import os
7
  import pathlib
8
  import random
 
9
  import tempfile
10
  from typing import List, Union
11
 
@@ -17,13 +18,35 @@ import requests
17
  import streamlit as st
18
  from dotenv import load_dotenv
19
 
20
- import global_config as gcfg
21
- import helpers.file_manager as filem
22
- from global_config import GlobalConfig
23
- from helpers import chat_helper, llm_helper, pptx_helper, text_helper
 
 
 
 
24
 
25
  load_dotenv()
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
28
 
29
 
@@ -309,7 +332,7 @@ def set_up_chat_ui():
309
  st.info(APP_TEXT['like_feedback'])
310
  st.chat_message('ai').write(random.choice(APP_TEXT['ai_greetings']))
311
 
312
- history = chat_helper.StreamlitChatMessageHistory(key=CHAT_MESSAGES)
313
  prompt_template = chat_helper.ChatPromptTemplate.from_template(
314
  _get_prompt_template(
315
  is_refinement=_is_it_refinement()
@@ -363,104 +386,39 @@ def set_up_chat_ui():
363
  f' {st.session_state["end_page"]} in {st.session_state["pdf_file"].name}'
364
  )
365
 
366
- # Get pdf contents
367
- st.session_state[ADDITIONAL_INFO] = filem.get_pdf_contents(
368
- st.session_state[PDF_FILE_KEY],
369
- (st.session_state['start_page'], st.session_state['end_page'])
370
- )
371
- provider, llm_name = llm_helper.get_provider_model(
372
- llm_provider_to_use,
373
- use_ollama=RUN_IN_OFFLINE_MODE
374
- )
375
-
376
- # Validate that provider and model were parsed successfully
377
- if not provider or not llm_name:
378
- handle_error(
379
- f'Failed to parse provider and model from: "{llm_provider_to_use}". '
380
- f'Please select a valid LLM from the dropdown.',
381
- True
382
- )
383
- return
384
-
385
- user_key = api_key_token.strip()
386
- az_deployment = azure_deployment.strip()
387
- az_endpoint = azure_endpoint.strip()
388
- api_ver = api_version.strip()
389
-
390
- if not are_all_inputs_valid(
391
- prompt_text, provider, llm_name, user_key,
392
- az_deployment, az_endpoint, api_ver
393
- ):
394
- return
395
-
396
- logger.info(
397
- 'User input: %s | #characters: %d | LLM: %s',
398
- prompt_text, len(prompt_text), llm_name
399
- )
400
  st.chat_message('user').write(prompt_text)
401
 
402
- if _is_it_refinement():
403
- user_messages = _get_user_messages()
404
- user_messages.append(prompt_text)
405
- list_of_msgs = [
406
- f'{idx + 1}. {msg}' for idx, msg in enumerate(user_messages)
407
- ]
408
- formatted_template = prompt_template.format(
409
- **{
410
- 'instructions': '\n'.join(list_of_msgs),
411
- 'previous_content': _get_last_response(),
412
- 'additional_info': st.session_state.get(ADDITIONAL_INFO, ''),
413
- }
414
- )
415
- else:
416
- formatted_template = prompt_template.format(
417
- **{
418
- 'question': prompt_text,
419
- 'additional_info': st.session_state.get(ADDITIONAL_INFO, ''),
420
- }
421
- )
422
 
423
  progress_bar = st.progress(0, 'Preparing to call LLM...')
424
- response = ''
 
 
425
 
426
  try:
427
- llm = llm_helper.get_litellm_llm(
428
- provider=provider,
429
- model=llm_name,
430
- max_new_tokens=gcfg.get_max_output_tokens(llm_provider_to_use),
431
- api_key=user_key,
432
- azure_endpoint_url=az_endpoint,
433
- azure_deployment_name=az_deployment,
434
- azure_api_version=api_ver,
435
- )
436
 
437
- if not llm:
438
- handle_error(
439
- 'Failed to create an LLM instance! Make sure that you have selected the'
440
- ' correct model from the dropdown list and have provided correct API key'
441
- ' or access token.',
442
- False
443
- )
444
- return
 
 
445
 
446
- for chunk in llm.stream(formatted_template):
447
- if isinstance(chunk, str):
448
- response += chunk
449
- else:
450
- content = getattr(chunk, 'content', None)
451
- if content is not None:
452
- response += content
453
- else:
454
- response += str(chunk)
455
-
456
- # Update the progress bar with an approx progress percentage
457
- progress_bar.progress(
458
- min(
459
- len(response) / gcfg.get_max_output_tokens(llm_provider_to_use),
460
- 0.95
461
- ),
462
- text='Streaming content...this might take a while...'
463
- )
464
  except (httpx.ConnectError, requests.exceptions.ConnectionError):
465
  handle_error(
466
  'A connection error occurred while streaming content from the LLM endpoint.'
@@ -469,22 +427,19 @@ def set_up_chat_ui():
469
  ' using Ollama, make sure that Ollama is already running on your system.',
470
  True
471
  )
472
- return
473
  except huggingface_hub.errors.ValidationError as ve:
474
  handle_error(
475
  f'An error occurred while trying to generate the content: {ve}'
476
  '\nPlease try again with a significantly shorter input text.',
477
  True
478
  )
479
- return
480
  except ollama.ResponseError:
481
  handle_error(
482
- f'The model `{llm_name}` is unavailable with Ollama on your system.'
483
- f' Make sure that you have provided the correct LLM name or pull it using'
484
- f' `ollama pull {llm_name}`. View LLMs available locally by running `ollama list`.',
485
  True
486
  )
487
- return
488
  except Exception as ex:
489
  _msg = str(ex)
490
  if 'payment required' in _msg.lower():
@@ -509,101 +464,6 @@ def set_up_chat_ui():
509
  ' Read **[how to get free LLM API keys](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)**.',
510
  True
511
  )
512
- return
513
-
514
- history.add_user_message(prompt_text)
515
- history.add_ai_message(response)
516
-
517
- # The content has been generated as JSON
518
- # There maybe trailing ``` at the end of the response -- remove them
519
- # To be careful: ``` may be part of the content as well when code is generated
520
- response = text_helper.get_clean_json(response)
521
- logger.info(
522
- 'Cleaned JSON length: %d', len(response)
523
- )
524
-
525
- # Now create the PPT file
526
- progress_bar.progress(
527
- GlobalConfig.LLM_PROGRESS_MAX,
528
- text='Finding photos online and generating the slide deck...'
529
- )
530
- progress_bar.progress(1.0, text='Done!')
531
- st.chat_message('ai').code(response, language='json')
532
-
533
- if path := generate_slide_deck(response):
534
- _display_download_button(path)
535
-
536
- logger.info(
537
- '#messages in history / 2: %d',
538
- len(st.session_state[CHAT_MESSAGES]) / 2
539
- )
540
-
541
-
542
- def generate_slide_deck(json_str: str) -> Union[pathlib.Path, None]:
543
- """
544
- Create a slide deck and return the file path. In case there is any error creating the slide
545
- deck, the path may be to an empty file.
546
-
547
- :param json_str: The content in *valid* JSON format.
548
- :return: The path to the .pptx file or `None` in case of error.
549
- """
550
-
551
- try:
552
- parsed_data = json5.loads(json_str)
553
- except ValueError:
554
- handle_error(
555
- 'Encountered error while parsing JSON...will fix it and retry',
556
- True
557
- )
558
- try:
559
- parsed_data = json5.loads(text_helper.fix_malformed_json(json_str))
560
- except ValueError:
561
- handle_error(
562
- 'Encountered an error again while fixing JSON...'
563
- 'the slide deck cannot be created, unfortunately ☹'
564
- '\nPlease try again later.',
565
- True
566
- )
567
- return None
568
- except RecursionError:
569
- handle_error(
570
- 'Encountered a recursion error while parsing JSON...'
571
- 'the slide deck cannot be created, unfortunately ☹'
572
- '\nPlease try again later.',
573
- True
574
- )
575
- return None
576
- except Exception:
577
- handle_error(
578
- 'Encountered an error while parsing JSON...'
579
- 'the slide deck cannot be created, unfortunately ☹'
580
- '\nPlease try again later.',
581
- True
582
- )
583
- return None
584
-
585
- if DOWNLOAD_FILE_KEY in st.session_state:
586
- path = pathlib.Path(st.session_state[DOWNLOAD_FILE_KEY])
587
- else:
588
- temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
589
- path = pathlib.Path(temp.name)
590
- st.session_state[DOWNLOAD_FILE_KEY] = str(path)
591
-
592
- if temp:
593
- temp.close()
594
-
595
- try:
596
- logger.debug('Creating PPTX file: %s...', st.session_state[DOWNLOAD_FILE_KEY])
597
- pptx_helper.generate_powerpoint_presentation(
598
- parsed_data,
599
- slides_template=pptx_template,
600
- output_file_path=path
601
- )
602
- except Exception as ex:
603
- st.error(APP_TEXT['content_generation_error'])
604
- logger.exception('Caught a generic exception: %s', str(ex))
605
-
606
- return path
607
 
608
 
609
  def _is_it_refinement() -> bool:
@@ -643,9 +503,6 @@ def _get_last_response() -> str:
643
  :return: The response text.
644
  """
645
 
646
- return st.session_state[CHAT_MESSAGES][-1].content
647
-
648
-
649
  def _display_messages_history(view_messages: st.expander):
650
  """
651
  Display the history of messages.
 
6
  import os
7
  import pathlib
8
  import random
9
+ import sys
10
  import tempfile
11
  from typing import List, Union
12
 
 
18
  import streamlit as st
19
  from dotenv import load_dotenv
20
 
21
+ sys.path.insert(0, os.path.abspath('src'))
22
+ from slidedeckai.core import SlideDeckAI
23
+ from slidedeckai import global_config as gcfg
24
+ from slidedeckai.global_config import GlobalConfig
25
+ from slidedeckai.helpers import llm_helper, text_helper
26
+ import slidedeckai.helpers.file_manager as filem
27
+ from slidedeckai.helpers.chat_helper import ChatMessage, HumanMessage, AIMessage
28
+ from slidedeckai.helpers import chat_helper
29
 
30
  load_dotenv()
31
 
32
+ class StreamlitChatMessageHistory:
33
+ """Chat message history stored in Streamlit session state."""
34
+
35
+ def __init__(self, key: str):
36
+ self.key = key
37
+ if key not in st.session_state:
38
+ st.session_state[key] = []
39
+
40
+ @property
41
+ def messages(self):
42
+ return st.session_state[self.key]
43
+
44
+ def add_user_message(self, content: str):
45
+ st.session_state[self.key].append(HumanMessage(content))
46
+
47
+ def add_ai_message(self, content: str):
48
+ st.session_state[self.key].append(AIMessage(content))
49
+
50
  RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
51
 
52
 
 
332
  st.info(APP_TEXT['like_feedback'])
333
  st.chat_message('ai').write(random.choice(APP_TEXT['ai_greetings']))
334
 
335
+ history = StreamlitChatMessageHistory(key=CHAT_MESSAGES)
336
  prompt_template = chat_helper.ChatPromptTemplate.from_template(
337
  _get_prompt_template(
338
  is_refinement=_is_it_refinement()
 
386
  f' {st.session_state["end_page"]} in {st.session_state["pdf_file"].name}'
387
  )
388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  st.chat_message('user').write(prompt_text)
390
 
391
+ slide_generator = SlideDeckAI(
392
+ model=llm_provider_to_use,
393
+ topic=prompt_text,
394
+ api_key=api_key_token.strip(),
395
+ template_idx=list(GlobalConfig.PPTX_TEMPLATE_FILES.keys()).index(pptx_template),
396
+ pdf_path_or_stream=st.session_state.get(PDF_FILE_KEY),
397
+ pdf_page_range=(st.session_state.get('start_page'), st.session_state.get('end_page')),
398
+ )
 
 
 
 
 
 
 
 
 
 
 
 
399
 
400
  progress_bar = st.progress(0, 'Preparing to call LLM...')
401
+
402
+ def progress_callback(current_progress):
403
+ progress_bar.progress(min(current_progress / gcfg.get_max_output_tokens(llm_provider_to_use), 0.95), text='Streaming content...this might take a while...')
404
 
405
  try:
406
+ if _is_it_refinement():
407
+ path = slide_generator.revise(instructions=prompt_text, progress_callback=progress_callback)
408
+ else:
409
+ path = slide_generator.generate(progress_callback=progress_callback)
 
 
 
 
 
410
 
411
+ progress_bar.progress(1.0, text='Done!')
412
+
413
+ if path:
414
+ st.session_state[DOWNLOAD_FILE_KEY] = str(path)
415
+ history.add_user_message(prompt_text)
416
+ history.add_ai_message(slide_generator.last_response)
417
+ st.chat_message('ai').code(slide_generator.last_response, language='json')
418
+ _display_download_button(path)
419
+ else:
420
+ handle_error("Failed to generate slide deck.", True)
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
  except (httpx.ConnectError, requests.exceptions.ConnectionError):
423
  handle_error(
424
  'A connection error occurred while streaming content from the LLM endpoint.'
 
427
  ' using Ollama, make sure that Ollama is already running on your system.',
428
  True
429
  )
 
430
  except huggingface_hub.errors.ValidationError as ve:
431
  handle_error(
432
  f'An error occurred while trying to generate the content: {ve}'
433
  '\nPlease try again with a significantly shorter input text.',
434
  True
435
  )
 
436
  except ollama.ResponseError:
437
  handle_error(
438
+ f'The model is unavailable with Ollama on your system.'
439
+ f' Make sure that you have provided the correct LLM name or pull it.'
440
+ f' View LLMs available locally by running `ollama list`.',
441
  True
442
  )
 
443
  except Exception as ex:
444
  _msg = str(ex)
445
  if 'payment required' in _msg.lower():
 
464
  ' Read **[how to get free LLM API keys](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)**.',
465
  True
466
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
 
468
 
469
  def _is_it_refinement() -> bool:
 
503
  :return: The response text.
504
  """
505
 
 
 
 
506
  def _display_messages_history(view_messages: st.expander):
507
  """
508
  Display the history of messages.
pyproject.toml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools>=77.0.3"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "slidedeckai"
7
+ authors = [
8
+ { name="Barun Saha", email="[email protected]" }
9
+ ]
10
+ description = "A Python package to generate slide decks using AI."
11
+ readme = "README.md"
12
+ requires-python = ">=3.10"
13
+ classifiers = [
14
+ "Programming Language :: Python :: 3",
15
+ "License :: OSI Approved :: MIT License",
16
+ "Operating System :: OS Independent"
17
+ ]
18
+ dynamic = ["dependencies", "version"]
19
+
20
+ [tool.setuptools]
21
+ package-dir = {"" = "src"}
22
+ include-package-data = true
23
+
24
+ [tool.setuptools.packages.find]
25
+ where = ["src"]
26
+
27
+ [tool.setuptools.dynamic]
28
+ dependencies = {file = ["requirements.txt"]}
29
+ version = {attr = "slidedeckai._version.__version__"}
30
+
31
+ [tool.setuptools.package-data]
32
+ slidedeckai = ["prompts/**/*.txt", "strings.json", "pptx_templates/*.pptx", "icons/png128/*.png", "icons/svg_repo.txt", "file_embeddings/*.npy"]
33
+
34
+ [project.urls]
35
+ "Homepage" = "https://github.com/barun-saha/slide-deck-ai"
36
+ "Bug Tracker" = "https://github.com/barun-saha/slide-deck-ai/issues"
37
+
38
+ [project.scripts]
39
+ slidedeckai = "slidedeckai.cli:main"
{helpers β†’ src/slidedeckai}/__init__.py RENAMED
File without changes
src/slidedeckai/_version.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "8.0.0"
src/slidedeckai/cli.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Command-line interface for SlideDeckAI.
3
+ """
4
+ import argparse
5
+ import subprocess
6
+ import sys
7
+ from .core import SlideDeckAI
8
+
9
+ def main():
10
+ """
11
+ The main function for the CLI.
12
+ """
13
+ parser = argparse.ArgumentParser(description='Generate slide decks with SlideDeckAI.')
14
+ subparsers = parser.add_subparsers(dest='command')
15
+
16
+ # 'generate' command
17
+ parser_generate = subparsers.add_parser('generate', help='Generate a new slide deck.')
18
+ parser_generate.add_argument('--model', required=True, help='The name of the LLM model to use.')
19
+ parser_generate.add_argument('--topic', required=True, help='The topic of the slide deck.')
20
+ parser_generate.add_argument('--api-key', help='The API key for the LLM provider.')
21
+ parser_generate.add_argument('--template-id', type=int, default=0, help='The index of the PowerPoint template to use.')
22
+ parser_generate.add_argument('--output-path', help='The path to save the generated .pptx file.')
23
+
24
+ # 'launch' command
25
+ subparsers.add_parser('launch', help='Launch the Streamlit app.')
26
+
27
+ args = parser.parse_args()
28
+
29
+ if args.command == 'generate':
30
+ slide_generator = SlideDeckAI(
31
+ model=args.model,
32
+ topic=args.topic,
33
+ api_key=args.api_key,
34
+ template_idx=args.template_id,
35
+ )
36
+
37
+ pptx_path = slide_generator.generate()
38
+
39
+ if args.output_path:
40
+ import shutil
41
+ shutil.move(str(pptx_path), args.output_path)
42
+ print(f"Slide deck saved to {args.output_path}")
43
+ else:
44
+ print(f"Slide deck saved to {pptx_path}")
45
+ elif args.command == 'launch':
46
+ # Get the path to the app.py file
47
+ import os
48
+ import slidedeckai
49
+ app_path = os.path.join(os.path.dirname(slidedeckai.__file__), '..', '..', 'app.py')
50
+ subprocess.run([sys.executable, '-m', 'streamlit', 'run', app_path])
51
+
52
+ if __name__ == '__main__':
53
+ main()
src/slidedeckai/core.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Core classes for SlideDeckAI.
3
+ """
4
+ import logging
5
+ import os
6
+ import pathlib
7
+ import tempfile
8
+ from typing import Union
9
+
10
+ import json5
11
+ from dotenv import load_dotenv
12
+
13
+ from . import global_config as gcfg
14
+ from .global_config import GlobalConfig
15
+ from .helpers import llm_helper, pptx_helper, text_helper
16
+ from .helpers.chat_helper import ChatMessageHistory
17
+
18
+ load_dotenv()
19
+
20
+ RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ from .helpers import file_manager as filem
25
+
26
+ class SlideDeckAI:
27
+ """
28
+ The main class for generating slide decks.
29
+ """
30
+
31
+ def __init__(self, model, topic, api_key=None, pdf_path_or_stream=None, pdf_page_range=None, template_idx=0):
32
+ """
33
+ Initializes the SlideDeckAI object.
34
+
35
+ :param model: The name of the LLM model to use.
36
+ :param topic: The topic of the slide deck.
37
+ :param api_key: The API key for the LLM provider.
38
+ :param pdf_path_or_stream: The path to a PDF file or a file-like object.
39
+ :param pdf_page_range: A tuple representing the page range to use from the PDF file.
40
+ :param template_idx: The index of the PowerPoint template to use.
41
+ """
42
+ self.model = model
43
+ self.topic = topic
44
+ self.api_key = api_key
45
+ self.pdf_path_or_stream = pdf_path_or_stream
46
+ self.pdf_page_range = pdf_page_range
47
+ self.template_idx = template_idx
48
+ self.chat_history = ChatMessageHistory()
49
+ self.last_response = None
50
+
51
+ def _get_prompt_template(self, is_refinement: bool) -> str:
52
+ """
53
+ Return a prompt template.
54
+
55
+ :param is_refinement: Whether this is the initial or refinement prompt.
56
+ :return: The prompt template as f-string.
57
+ """
58
+ if is_refinement:
59
+ with open(GlobalConfig.REFINEMENT_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
60
+ template = in_file.read()
61
+ else:
62
+ with open(GlobalConfig.INITIAL_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
63
+ template = in_file.read()
64
+ return template
65
+
66
+ def generate(self, progress_callback=None):
67
+ """
68
+ Generates the initial slide deck.
69
+ :return: The path to the generated .pptx file.
70
+ """
71
+ additional_info = ''
72
+ if self.pdf_path_or_stream:
73
+ additional_info = filem.get_pdf_contents(self.pdf_path_or_stream, self.pdf_page_range)
74
+
75
+ self.chat_history.add_user_message(self.topic)
76
+ prompt_template = self._get_prompt_template(is_refinement=False)
77
+ formatted_template = prompt_template.format(question=self.topic, additional_info=additional_info)
78
+
79
+ provider, llm_name = llm_helper.get_provider_model(self.model, use_ollama=RUN_IN_OFFLINE_MODE)
80
+
81
+ llm = llm_helper.get_litellm_llm(
82
+ provider=provider,
83
+ model=llm_name,
84
+ max_new_tokens=gcfg.get_max_output_tokens(self.model),
85
+ api_key=self.api_key,
86
+ )
87
+
88
+ response = ""
89
+ for chunk in llm.stream(formatted_template):
90
+ if isinstance(chunk, str):
91
+ response += chunk
92
+ else:
93
+ content = getattr(chunk, 'content', None)
94
+ if content is not None:
95
+ response += content
96
+ else:
97
+ response += str(chunk)
98
+ if progress_callback:
99
+ progress_callback(len(response))
100
+
101
+ self.last_response = text_helper.get_clean_json(response)
102
+ self.chat_history.add_ai_message(self.last_response)
103
+
104
+ return self._generate_slide_deck(self.last_response)
105
+
106
+ def revise(self, instructions, progress_callback=None):
107
+ """
108
+ Revises the slide deck with new instructions.
109
+
110
+ :param instructions: The instructions for revising the slide deck.
111
+ :return: The path to the revised .pptx file.
112
+ """
113
+ if not self.last_response:
114
+ raise ValueError("You must generate a slide deck before you can revise it.")
115
+
116
+ if len(self.chat_history.messages) >= 16:
117
+ raise ValueError("Chat history is full. Please reset to continue.")
118
+
119
+ self.chat_history.add_user_message(instructions)
120
+
121
+ prompt_template = self._get_prompt_template(is_refinement=True)
122
+
123
+ list_of_msgs = [f'{idx + 1}. {msg.content}' for idx, msg in enumerate(self.chat_history.messages) if msg.role == 'user']
124
+
125
+ additional_info = ''
126
+ if self.pdf_path_or_stream:
127
+ additional_info = filem.get_pdf_contents(self.pdf_path_or_stream, self.pdf_page_range)
128
+
129
+ formatted_template = prompt_template.format(
130
+ instructions='\n'.join(list_of_msgs),
131
+ previous_content=self.last_response,
132
+ additional_info=additional_info,
133
+ )
134
+
135
+ provider, llm_name = llm_helper.get_provider_model(self.model, use_ollama=RUN_IN_OFFLINE_MODE)
136
+
137
+ llm = llm_helper.get_litellm_llm(
138
+ provider=provider,
139
+ model=llm_name,
140
+ max_new_tokens=gcfg.get_max_output_tokens(self.model),
141
+ api_key=self.api_key,
142
+ )
143
+
144
+ response = ""
145
+ for chunk in llm.stream(formatted_template):
146
+ if isinstance(chunk, str):
147
+ response += chunk
148
+ else:
149
+ content = getattr(chunk, 'content', None)
150
+ if content is not None:
151
+ response += content
152
+ else:
153
+ response += str(chunk)
154
+ if progress_callback:
155
+ progress_callback(len(response))
156
+
157
+ self.last_response = text_helper.get_clean_json(response)
158
+ self.chat_history.add_ai_message(self.last_response)
159
+
160
+ return self._generate_slide_deck(self.last_response)
161
+
162
+ def _generate_slide_deck(self, json_str: str) -> Union[pathlib.Path, None]:
163
+ """
164
+ Create a slide deck and return the file path.
165
+
166
+ :param json_str: The content in *valid* JSON format.
167
+ :return: The path to the .pptx file or `None` in case of error.
168
+ """
169
+ try:
170
+ parsed_data = json5.loads(json_str)
171
+ except (ValueError, RecursionError) as e:
172
+ logger.error("Error parsing JSON: %s", e)
173
+ try:
174
+ parsed_data = json5.loads(text_helper.fix_malformed_json(json_str))
175
+ except (ValueError, RecursionError) as e2:
176
+ logger.error("Error parsing fixed JSON: %s", e2)
177
+ return None
178
+
179
+ temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
180
+ path = pathlib.Path(temp.name)
181
+ temp.close()
182
+
183
+ try:
184
+ pptx_helper.generate_powerpoint_presentation(
185
+ parsed_data,
186
+ slides_template=list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())[self.template_idx],
187
+ output_file_path=path
188
+ )
189
+ except Exception as ex:
190
+ logger.exception('Caught a generic exception: %s', str(ex))
191
+ return None
192
+
193
+ return path
194
+
195
+ def set_template(self, idx):
196
+ """
197
+ Sets the PowerPoint template to use.
198
+
199
+ :param idx: The index of the template to use.
200
+ """
201
+ self.template_idx = idx
202
+
203
+ def reset(self):
204
+ """
205
+ Resets the chat history.
206
+ """
207
+ self.chat_history = ChatMessageHistory()
208
+ self.last_response = None
{file_embeddings β†’ src/slidedeckai/file_embeddings}/embeddings.npy RENAMED
File without changes
{file_embeddings β†’ src/slidedeckai/file_embeddings}/icons.npy RENAMED
File without changes
global_config.py β†’ src/slidedeckai/global_config.py RENAMED
@@ -4,6 +4,7 @@ A set of configurations used by the app.
4
  import logging
5
  import os
6
  import re
 
7
 
8
  from dataclasses import dataclass
9
  from dotenv import load_dotenv
@@ -11,6 +12,7 @@ from dotenv import load_dotenv
11
 
12
  load_dotenv()
13
 
 
14
 
15
  @dataclass(frozen=True)
16
  class GlobalConfig:
@@ -128,32 +130,32 @@ class GlobalConfig:
128
 
129
  LOG_LEVEL = 'DEBUG'
130
  COUNT_TOKENS = False
131
- APP_STRINGS_FILE = 'strings.json'
132
- PRELOAD_DATA_FILE = 'examples/example_02.json'
133
- INITIAL_PROMPT_TEMPLATE = 'prompts/initial_template_v4_two_cols_img.txt'
134
- REFINEMENT_PROMPT_TEMPLATE = 'prompts/refinement_template_v4_two_cols_img.txt'
135
 
136
  LLM_PROGRESS_MAX = 90
137
- ICONS_DIR = 'icons/png128/'
138
  TINY_BERT_MODEL = 'gaunernst/bert-mini-uncased'
139
- EMBEDDINGS_FILE_NAME = 'file_embeddings/embeddings.npy'
140
- ICONS_FILE_NAME = 'file_embeddings/icons.npy'
141
 
142
  PPTX_TEMPLATE_FILES = {
143
  'Basic': {
144
- 'file': 'pptx_templates/Blank.pptx',
145
  'caption': 'A good start (Uses [photos](https://unsplash.com/photos/AFZ-qBPEceA) by [cetteup](https://unsplash.com/@cetteup?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash) on [Unsplash](https://unsplash.com/photos/a-foggy-forest-filled-with-lots-of-trees-d3ci37Gcgxg?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash)) 🟧'
146
  },
147
  'Ion Boardroom': {
148
- 'file': 'pptx_templates/Ion_Boardroom.pptx',
149
  'caption': 'Make some bold decisions πŸŸ₯'
150
  },
151
  'Minimalist Sales Pitch': {
152
- 'file': 'pptx_templates/Minimalist_sales_pitch.pptx',
153
  'caption': 'In high contrast ⬛'
154
  },
155
  'Urban Monochrome': {
156
- 'file': 'pptx_templates/Urban_monochrome.pptx',
157
  'caption': 'Marvel in a monochrome dream ⬜'
158
  },
159
  }
 
4
  import logging
5
  import os
6
  import re
7
+ from pathlib import Path
8
 
9
  from dataclasses import dataclass
10
  from dotenv import load_dotenv
 
12
 
13
  load_dotenv()
14
 
15
+ _SRC_DIR = Path(__file__).resolve().parent
16
 
17
  @dataclass(frozen=True)
18
  class GlobalConfig:
 
130
 
131
  LOG_LEVEL = 'DEBUG'
132
  COUNT_TOKENS = False
133
+ APP_STRINGS_FILE = _SRC_DIR / 'strings.json'
134
+ PRELOAD_DATA_FILE = _SRC_DIR / 'examples/example_02.json'
135
+ INITIAL_PROMPT_TEMPLATE = _SRC_DIR / 'prompts/initial_template_v4_two_cols_img.txt'
136
+ REFINEMENT_PROMPT_TEMPLATE = _SRC_DIR / 'prompts/refinement_template_v4_two_cols_img.txt'
137
 
138
  LLM_PROGRESS_MAX = 90
139
+ ICONS_DIR = _SRC_DIR / 'icons/png128/'
140
  TINY_BERT_MODEL = 'gaunernst/bert-mini-uncased'
141
+ EMBEDDINGS_FILE_NAME = _SRC_DIR / 'file_embeddings/embeddings.npy'
142
+ ICONS_FILE_NAME = _SRC_DIR / 'file_embeddings/icons.npy'
143
 
144
  PPTX_TEMPLATE_FILES = {
145
  'Basic': {
146
+ 'file': _SRC_DIR / 'pptx_templates/Blank.pptx',
147
  'caption': 'A good start (Uses [photos](https://unsplash.com/photos/AFZ-qBPEceA) by [cetteup](https://unsplash.com/@cetteup?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash) on [Unsplash](https://unsplash.com/photos/a-foggy-forest-filled-with-lots-of-trees-d3ci37Gcgxg?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash)) 🟧'
148
  },
149
  'Ion Boardroom': {
150
+ 'file': _SRC_DIR / 'pptx_templates/Ion_Boardroom.pptx',
151
  'caption': 'Make some bold decisions πŸŸ₯'
152
  },
153
  'Minimalist Sales Pitch': {
154
+ 'file': _SRC_DIR / 'pptx_templates/Minimalist_sales_pitch.pptx',
155
  'caption': 'In high contrast ⬛'
156
  },
157
  'Urban Monochrome': {
158
+ 'file': _SRC_DIR / 'pptx_templates/Urban_monochrome.pptx',
159
  'caption': 'Marvel in a monochrome dream ⬜'
160
  },
161
  }
src/slidedeckai/helpers/__init__.py ADDED
File without changes
{helpers β†’ src/slidedeckai/helpers}/chat_helper.py RENAMED
@@ -27,24 +27,17 @@ class AIMessage(ChatMessage):
27
  super().__init__(content, 'ai')
28
 
29
 
30
- class StreamlitChatMessageHistory:
31
- """Chat message history stored in Streamlit session state."""
32
 
33
- def __init__(self, key: str):
34
- self.key = key
35
- if key not in st.session_state:
36
- st.session_state[key] = []
37
-
38
- @property
39
- def messages(self):
40
- return st.session_state[self.key]
41
 
42
  def add_user_message(self, content: str):
43
- st.session_state[self.key].append(HumanMessage(content))
44
 
45
  def add_ai_message(self, content: str):
46
- st.session_state[self.key].append(AIMessage(content))
47
-
48
 
49
  class ChatPromptTemplate:
50
  """Template for chat prompts."""
 
27
  super().__init__(content, 'ai')
28
 
29
 
30
+ class ChatMessageHistory:
31
+ """Chat message history stored in a list."""
32
 
33
+ def __init__(self):
34
+ self.messages = []
 
 
 
 
 
 
35
 
36
  def add_user_message(self, content: str):
37
+ self.messages.append(HumanMessage(content))
38
 
39
  def add_ai_message(self, content: str):
40
+ self.messages.append(AIMessage(content))
 
41
 
42
  class ChatPromptTemplate:
43
  """Template for chat prompts."""
{helpers β†’ src/slidedeckai/helpers}/file_manager.py RENAMED
@@ -8,10 +8,7 @@ import sys
8
  import streamlit as st
9
  from pypdf import PdfReader
10
 
11
- sys.path.append('..')
12
- sys.path.append('../..')
13
-
14
- from global_config import GlobalConfig
15
 
16
 
17
  logger = logging.getLogger(__name__)
 
8
  import streamlit as st
9
  from pypdf import PdfReader
10
 
11
+ from ..global_config import GlobalConfig
 
 
 
12
 
13
 
14
  logger = logging.getLogger(__name__)
{helpers β†’ src/slidedeckai/helpers}/icons_embeddings.py RENAMED
@@ -11,10 +11,7 @@ import numpy as np
11
  from sklearn.metrics.pairwise import cosine_similarity
12
  from transformers import BertTokenizer, BertModel
13
 
14
- sys.path.append('..')
15
- sys.path.append('../..')
16
-
17
- from global_config import GlobalConfig
18
 
19
 
20
  tokenizer = BertTokenizer.from_pretrained(GlobalConfig.TINY_BERT_MODEL)
@@ -28,9 +25,9 @@ def get_icons_list() -> List[str]:
28
  :return: The icons file names.
29
  """
30
 
31
- items = pathlib.Path('../' + GlobalConfig.ICONS_DIR).glob('*.png')
32
  items = [
33
- os.path.basename(str(item)).removesuffix('.png') for item in items
34
  ]
35
 
36
  return items
 
11
  from sklearn.metrics.pairwise import cosine_similarity
12
  from transformers import BertTokenizer, BertModel
13
 
14
+ from ..global_config import GlobalConfig
 
 
 
15
 
16
 
17
  tokenizer = BertTokenizer.from_pretrained(GlobalConfig.TINY_BERT_MODEL)
 
25
  :return: The icons file names.
26
  """
27
 
28
+ items = GlobalConfig.ICONS_DIR.glob('*.png')
29
  items = [
30
+ item.stem for item in items
31
  ]
32
 
33
  return items
{helpers β†’ src/slidedeckai/helpers}/image_search.py RENAMED
File without changes
{helpers β†’ src/slidedeckai/helpers}/llm_helper.py RENAMED
@@ -8,9 +8,7 @@ import urllib3
8
  from typing import Tuple, Union, Iterator, Optional
9
 
10
 
11
- sys.path.append('..')
12
-
13
- from global_config import GlobalConfig
14
 
15
  try:
16
  import litellm
 
8
  from typing import Tuple, Union, Iterator, Optional
9
 
10
 
11
+ from ..global_config import GlobalConfig
 
 
12
 
13
  try:
14
  import litellm
{helpers β†’ src/slidedeckai/helpers}/pptx_helper.py RENAMED
@@ -16,12 +16,9 @@ from dotenv import load_dotenv
16
  from pptx.enum.shapes import MSO_AUTO_SHAPE_TYPE
17
  from pptx.shapes.placeholder import PicturePlaceholder, SlidePlaceholder
18
 
19
- sys.path.append('..')
20
- sys.path.append('../..')
21
-
22
- import helpers.icons_embeddings as ice
23
- import helpers.image_search as ims
24
- from global_config import GlobalConfig
25
 
26
 
27
  load_dotenv()
 
16
  from pptx.enum.shapes import MSO_AUTO_SHAPE_TYPE
17
  from pptx.shapes.placeholder import PicturePlaceholder, SlidePlaceholder
18
 
19
+ from . import icons_embeddings as ice
20
+ from . import image_search as ims
21
+ from ..global_config import GlobalConfig
 
 
 
22
 
23
 
24
  load_dotenv()
{helpers β†’ src/slidedeckai/helpers}/text_helper.py RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/0-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/1-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/123.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/2-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/3-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/4-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/5-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/6-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/7-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/8-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/9-circle.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/activity.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/airplane.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/alarm.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/alien-head.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/alphabet.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/amazon.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/amritsar-golden-temple.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/amsterdam-canal.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/amsterdam-windmill.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/android.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/angkor-wat.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/apple.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/archive.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/argentina-obelisk.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/artificial-intelligence-brain.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/atlanta.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/austin.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/automation-decision.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/award.png RENAMED
File without changes
{icons β†’ src/slidedeckai/icons}/png128/balloon.png RENAMED
File without changes