Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -167,8 +167,7 @@ def generate_filename(prompt, response, file_type="md"):
|
|
| 167 |
Generate filename with meaningful terms and short dense clips from prompt & response.
|
| 168 |
The filename should be about 150 chars total, include high-info terms, and a clipped snippet.
|
| 169 |
"""
|
| 170 |
-
|
| 171 |
-
prefix = f"{timestamp}_"
|
| 172 |
combined = (prompt + " " + response).strip()
|
| 173 |
info_terms = get_high_info_terms(combined, top_n=10)
|
| 174 |
|
|
@@ -194,24 +193,6 @@ def create_file(prompt, response, file_type="md"):
|
|
| 194 |
f.write(prompt + "\n\n" + response)
|
| 195 |
return filename
|
| 196 |
|
| 197 |
-
def append_to_transcript(entry: dict):
|
| 198 |
-
"""Append a new entry at the top of transcript.md"""
|
| 199 |
-
transcript_file = "transcript.md"
|
| 200 |
-
new_entry = f"1. **Input:** {entry['input']}\n\n **Output:** {entry['output']}\n\n **Files:**\n"
|
| 201 |
-
for file in entry['files']:
|
| 202 |
-
emoji = FILE_EMOJIS.get(file.split('.')[-1], '')
|
| 203 |
-
new_entry += f" - {emoji} [{os.path.basename(file)}]({file})\n"
|
| 204 |
-
new_entry += "\n---\n\n"
|
| 205 |
-
|
| 206 |
-
if os.path.exists(transcript_file):
|
| 207 |
-
with open(transcript_file, 'r', encoding='utf-8') as f:
|
| 208 |
-
existing_content = f.read()
|
| 209 |
-
else:
|
| 210 |
-
existing_content = ""
|
| 211 |
-
|
| 212 |
-
with open(transcript_file, 'w', encoding='utf-8') as f:
|
| 213 |
-
f.write(new_entry + existing_content)
|
| 214 |
-
|
| 215 |
def get_download_link(file, file_type="zip"):
|
| 216 |
"""Generate download link for file"""
|
| 217 |
with open(file, "rb") as f:
|
|
@@ -231,7 +212,6 @@ def get_download_link(file, file_type="zip"):
|
|
| 231 |
def clean_for_speech(text: str) -> str:
|
| 232 |
"""Clean text for speech synthesis"""
|
| 233 |
text = text.replace("\n", " ")
|
| 234 |
-
text = text.replace("\r", " ")
|
| 235 |
text = text.replace("</s>", " ")
|
| 236 |
text = text.replace("#", "")
|
| 237 |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
|
|
@@ -259,7 +239,7 @@ async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=
|
|
| 259 |
rate_str = f"{rate:+d}%"
|
| 260 |
pitch_str = f"{pitch:+d}Hz"
|
| 261 |
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
|
| 262 |
-
out_fn = generate_filename(text, text,
|
| 263 |
await communicate.save(out_fn)
|
| 264 |
return out_fn
|
| 265 |
|
|
@@ -337,16 +317,9 @@ def process_video_with_gpt(video_path, prompt):
|
|
| 337 |
|
| 338 |
# 🤖 9. AI Model Integration
|
| 339 |
|
| 340 |
-
def save_full_transcript(query, text
|
| 341 |
"""Save full transcript of Arxiv results as a file."""
|
| 342 |
-
|
| 343 |
-
sanitized_query = query.replace('\r', ' ').replace('\n', ' ').strip()
|
| 344 |
-
entry = {
|
| 345 |
-
'input': sanitized_query,
|
| 346 |
-
'output': text.replace('\r', ' ').replace('\n', ' ').strip(),
|
| 347 |
-
'files': files
|
| 348 |
-
}
|
| 349 |
-
append_to_transcript(entry)
|
| 350 |
|
| 351 |
def parse_arxiv_refs(ref_text: str):
|
| 352 |
"""
|
|
@@ -415,7 +388,6 @@ def create_paper_audio_files(papers, input_question):
|
|
| 415 |
"""
|
| 416 |
# Collect all content for combined summary
|
| 417 |
combined_titles = []
|
| 418 |
-
created_files = []
|
| 419 |
|
| 420 |
for paper in papers:
|
| 421 |
try:
|
|
@@ -426,7 +398,6 @@ def create_paper_audio_files(papers, input_question):
|
|
| 426 |
file_format = st.session_state['audio_format']
|
| 427 |
full_file = speak_with_edge_tts(full_text, voice=st.session_state['tts_voice'], file_format=file_format)
|
| 428 |
paper['full_audio'] = full_file
|
| 429 |
-
created_files.append(full_file)
|
| 430 |
|
| 431 |
# Display the audio immediately after generation
|
| 432 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} {os.path.basename(full_file)}")
|
|
@@ -446,9 +417,6 @@ def create_paper_audio_files(papers, input_question):
|
|
| 446 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} Combined Summary Audio")
|
| 447 |
play_and_download_audio(combined_file, file_type=file_format)
|
| 448 |
papers.append({'title': 'Combined Summary', 'full_audio': combined_file})
|
| 449 |
-
created_files.append(combined_file)
|
| 450 |
-
|
| 451 |
-
return created_files
|
| 452 |
|
| 453 |
def display_papers(papers):
|
| 454 |
"""
|
|
@@ -492,9 +460,8 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
| 492 |
|
| 493 |
# Parse and process papers
|
| 494 |
papers = parse_arxiv_refs(refs)
|
| 495 |
-
created_files = []
|
| 496 |
if papers:
|
| 497 |
-
|
| 498 |
display_papers(papers)
|
| 499 |
else:
|
| 500 |
st.warning("No papers found in the response.")
|
|
@@ -503,9 +470,7 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
| 503 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
| 504 |
|
| 505 |
# Save full transcript
|
| 506 |
-
|
| 507 |
-
created_files.append(create_md_output)
|
| 508 |
-
save_full_transcript(q, result, created_files)
|
| 509 |
return result
|
| 510 |
|
| 511 |
def process_with_gpt(text):
|
|
@@ -523,9 +488,7 @@ def process_with_gpt(text):
|
|
| 523 |
)
|
| 524 |
ans = c.choices[0].message.content
|
| 525 |
st.write("GPT-4o: " + ans)
|
| 526 |
-
|
| 527 |
-
created_files = [create_md_output]
|
| 528 |
-
save_full_transcript(text, ans, created_files)
|
| 529 |
st.session_state.messages.append({"role":"assistant","content":ans})
|
| 530 |
return ans
|
| 531 |
|
|
@@ -543,19 +506,16 @@ def process_with_claude(text):
|
|
| 543 |
)
|
| 544 |
ans = r.content[0].text
|
| 545 |
st.write("Claude-3.5: " + ans)
|
| 546 |
-
|
| 547 |
-
created_files = [create_md_output]
|
| 548 |
-
save_full_transcript(text, ans, created_files)
|
| 549 |
st.session_state.chat_history.append({"user":text,"claude":ans})
|
| 550 |
return ans
|
| 551 |
|
| 552 |
# 📂 10. File Management
|
| 553 |
def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
| 554 |
"""Create zip with intelligent naming based on top 10 common words."""
|
| 555 |
-
# Exclude '
|
| 556 |
-
md_files = [f for f in md_files if os.path.basename(f).lower() != '
|
| 557 |
all_files = md_files + mp3_files + wav_files
|
| 558 |
-
|
| 559 |
if not all_files:
|
| 560 |
return None
|
| 561 |
|
|
@@ -577,7 +537,7 @@ def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
|
| 577 |
combined_content = " ".join(all_content)
|
| 578 |
info_terms = get_high_info_terms(combined_content, top_n=10)
|
| 579 |
|
| 580 |
-
timestamp = datetime.now().strftime("%y%
|
| 581 |
name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:10])
|
| 582 |
zip_name = f"{timestamp}_{name_text}.zip"
|
| 583 |
|
|
@@ -587,134 +547,93 @@ def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
|
| 587 |
|
| 588 |
return zip_name
|
| 589 |
|
| 590 |
-
def
|
| 591 |
-
"""Load and
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 616 |
"""Display file manager in sidebar"""
|
| 617 |
st.sidebar.title("🎵 Audio & Docs Manager")
|
| 618 |
|
| 619 |
-
|
| 620 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 621 |
top_bar = st.sidebar.columns(4) # 🆕 Adjusted columns to accommodate WAV
|
| 622 |
with top_bar[0]:
|
| 623 |
if st.button("🗑 DelAllMD"):
|
| 624 |
-
|
| 625 |
-
md_files = [f for f in md_files if os.path.basename(f).lower() != 'transcript.md']
|
| 626 |
-
for f in md_files:
|
| 627 |
os.remove(f)
|
| 628 |
st.session_state.should_rerun = True
|
| 629 |
with top_bar[1]:
|
| 630 |
if st.button("🗑 DelAllMP3"):
|
| 631 |
-
|
| 632 |
-
for f in mp3_files:
|
| 633 |
os.remove(f)
|
| 634 |
st.session_state.should_rerun = True
|
| 635 |
with top_bar[2]:
|
| 636 |
if st.button("🗑 DelAllWAV"):
|
| 637 |
-
|
| 638 |
-
for f in wav_files:
|
| 639 |
os.remove(f)
|
| 640 |
st.session_state.should_rerun = True
|
| 641 |
with top_bar[3]:
|
| 642 |
if st.button("⬇️ ZipAll"):
|
| 643 |
-
|
| 644 |
-
md_files = [f for f in md_files if os.path.basename(f).lower() != 'transcript.md']
|
| 645 |
-
mp3_files = glob.glob("*.mp3")
|
| 646 |
-
wav_files = glob.glob("*.wav") # 🆕 Load WAV files
|
| 647 |
-
zip_name = create_zip_of_files(md_files, mp3_files, wav_files, input_question=st.session_state.get('last_query', ''))
|
| 648 |
if zip_name:
|
| 649 |
st.sidebar.markdown(get_download_link(zip_name, file_type="zip"), unsafe_allow_html=True)
|
| 650 |
|
| 651 |
-
for
|
| 652 |
-
|
| 653 |
-
with st.sidebar.expander(f"{FILE_EMOJIS.get('md', '')} {group_name}", expanded=
|
| 654 |
c1,c2 = st.columns(2)
|
| 655 |
with c1:
|
| 656 |
-
if st.button("👀ViewGrp", key="view_group_"+
|
| 657 |
-
st.session_state.viewing_prefix =
|
| 658 |
with c2:
|
| 659 |
-
if st.button("🗑DelGrp", key="del_group_"+
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
if os.path.exists(f):
|
| 663 |
-
os.remove(f)
|
| 664 |
-
# Reload transcript without this entry
|
| 665 |
-
all_entries = load_transcript()
|
| 666 |
-
del all_entries[idx-1]
|
| 667 |
-
# Rewrite transcript.md
|
| 668 |
-
transcript_file = "transcript.md"
|
| 669 |
-
with open(transcript_file, 'w', encoding='utf-8') as tf:
|
| 670 |
-
for i, e in enumerate(all_entries, 1):
|
| 671 |
-
new_entry = f"1. **Input:** {e['input']}\n\n **Output:** {e['output']}\n\n **Files:**\n"
|
| 672 |
-
for file in e['files']:
|
| 673 |
-
emoji = FILE_EMOJIS.get(file.split('.')[-1], '')
|
| 674 |
-
new_entry += f" - {emoji} [{os.path.basename(file)}]({file})\n"
|
| 675 |
-
new_entry += "\n---\n\n"
|
| 676 |
-
tf.write(new_entry)
|
| 677 |
st.success(f"Deleted group {group_name}!")
|
| 678 |
st.session_state.should_rerun = True
|
| 679 |
|
| 680 |
-
for f in
|
| 681 |
fname = os.path.basename(f)
|
| 682 |
ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
|
| 683 |
-
st.
|
| 684 |
-
|
| 685 |
-
def extract_keywords_from_md(files):
|
| 686 |
-
"""Extract keywords from markdown files"""
|
| 687 |
-
text = ""
|
| 688 |
-
for f in files:
|
| 689 |
-
if f.endswith(".md"):
|
| 690 |
-
c = open(f,'r',encoding='utf-8').read()
|
| 691 |
-
text += " " + c
|
| 692 |
-
return get_high_info_terms(text, top_n=5)
|
| 693 |
-
|
| 694 |
-
def display_viewing_group(group_idx):
|
| 695 |
-
"""Display the contents of a viewing group"""
|
| 696 |
-
groups_sorted = load_transcript()
|
| 697 |
-
if group_idx < 1 or group_idx > len(groups_sorted):
|
| 698 |
-
st.error("Invalid group selected.")
|
| 699 |
-
return
|
| 700 |
-
entry = groups_sorted[group_idx-1]
|
| 701 |
-
st.write("---")
|
| 702 |
-
st.write(f"**Viewing Group {group_idx}:** {entry['input']}")
|
| 703 |
-
for f in entry['files']:
|
| 704 |
-
fname = os.path.basename(f)
|
| 705 |
-
ext = os.path.splitext(fname)[1].lower().strip('.')
|
| 706 |
-
st.write(f"### {fname}")
|
| 707 |
-
if ext == "md":
|
| 708 |
-
content = open(f,'r',encoding='utf-8').read()
|
| 709 |
-
st.markdown(content)
|
| 710 |
-
elif ext == "mp3":
|
| 711 |
-
st.audio(f)
|
| 712 |
-
elif ext == "wav":
|
| 713 |
-
st.audio(f) # 🆕 Handle WAV files
|
| 714 |
-
else:
|
| 715 |
-
st.markdown(get_download_link(f), unsafe_allow_html=True)
|
| 716 |
-
if st.button("❌ Close"):
|
| 717 |
-
st.session_state.viewing_prefix = None
|
| 718 |
|
| 719 |
# 🎯 11. Main Application
|
| 720 |
def main():
|
|
@@ -807,8 +726,7 @@ def main():
|
|
| 807 |
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
| 808 |
titles_summary=titles_summary, full_audio=full_audio)
|
| 809 |
if full_transcript:
|
| 810 |
-
|
| 811 |
-
pass
|
| 812 |
|
| 813 |
st.markdown("### Change Prompt & Re-Run")
|
| 814 |
q_new = st.text_input("🔄 Modify Query:")
|
|
@@ -817,8 +735,7 @@ def main():
|
|
| 817 |
result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
| 818 |
titles_summary=titles_summary, full_audio=full_audio)
|
| 819 |
if full_transcript:
|
| 820 |
-
|
| 821 |
-
pass
|
| 822 |
|
| 823 |
elif tab_main == "🎤 Voice":
|
| 824 |
st.subheader("🎤 Voice Input")
|
|
@@ -878,16 +795,34 @@ def main():
|
|
| 878 |
st.write("Select a file from the sidebar to edit.")
|
| 879 |
|
| 880 |
# Load and display files in the sidebar
|
| 881 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 882 |
|
| 883 |
-
# Display viewing group if selected
|
| 884 |
-
if st.session_state.viewing_prefix:
|
| 885 |
-
display_viewing_group(st.session_state.viewing_prefix)
|
| 886 |
-
|
| 887 |
-
# Additional Markdown in sidebar
|
| 888 |
markdownPapers = """
|
| 889 |
|
| 890 |
-
# Levels of AGI
|
| 891 |
|
| 892 |
## 1. Performance (rows) x Generality (columns)
|
| 893 |
- **Narrow**
|
|
@@ -962,7 +897,7 @@ def main():
|
|
| 962 |
- *Reference:* Stockfish (2023). **Stockfish Chess Engine**. [Website](https://stockfishchess.org)
|
| 963 |
- **Artificial Superintelligence (ASI)**
|
| 964 |
- Not yet achieved
|
| 965 |
-
|
| 966 |
|
| 967 |
# 🧬 Innovative Architecture of AlphaFold2: A Hybrid System
|
| 968 |
|
|
@@ -1009,7 +944,7 @@ def main():
|
|
| 1009 |
|
| 1010 |
"""
|
| 1011 |
st.sidebar.markdown(markdownPapers)
|
| 1012 |
-
|
| 1013 |
if st.session_state.should_rerun:
|
| 1014 |
st.session_state.should_rerun = False
|
| 1015 |
st.rerun()
|
|
|
|
| 167 |
Generate filename with meaningful terms and short dense clips from prompt & response.
|
| 168 |
The filename should be about 150 chars total, include high-info terms, and a clipped snippet.
|
| 169 |
"""
|
| 170 |
+
prefix = datetime.now().strftime("%y%m_%H%M") + "_"
|
|
|
|
| 171 |
combined = (prompt + " " + response).strip()
|
| 172 |
info_terms = get_high_info_terms(combined, top_n=10)
|
| 173 |
|
|
|
|
| 193 |
f.write(prompt + "\n\n" + response)
|
| 194 |
return filename
|
| 195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
def get_download_link(file, file_type="zip"):
|
| 197 |
"""Generate download link for file"""
|
| 198 |
with open(file, "rb") as f:
|
|
|
|
| 212 |
def clean_for_speech(text: str) -> str:
|
| 213 |
"""Clean text for speech synthesis"""
|
| 214 |
text = text.replace("\n", " ")
|
|
|
|
| 215 |
text = text.replace("</s>", " ")
|
| 216 |
text = text.replace("#", "")
|
| 217 |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
|
|
|
|
| 239 |
rate_str = f"{rate:+d}%"
|
| 240 |
pitch_str = f"{pitch:+d}Hz"
|
| 241 |
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
|
| 242 |
+
out_fn = generate_filename(text, text, file_type=file_format)
|
| 243 |
await communicate.save(out_fn)
|
| 244 |
return out_fn
|
| 245 |
|
|
|
|
| 317 |
|
| 318 |
# 🤖 9. AI Model Integration
|
| 319 |
|
| 320 |
+
def save_full_transcript(query, text):
|
| 321 |
"""Save full transcript of Arxiv results as a file."""
|
| 322 |
+
create_file(query, text, "md")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 323 |
|
| 324 |
def parse_arxiv_refs(ref_text: str):
|
| 325 |
"""
|
|
|
|
| 388 |
"""
|
| 389 |
# Collect all content for combined summary
|
| 390 |
combined_titles = []
|
|
|
|
| 391 |
|
| 392 |
for paper in papers:
|
| 393 |
try:
|
|
|
|
| 398 |
file_format = st.session_state['audio_format']
|
| 399 |
full_file = speak_with_edge_tts(full_text, voice=st.session_state['tts_voice'], file_format=file_format)
|
| 400 |
paper['full_audio'] = full_file
|
|
|
|
| 401 |
|
| 402 |
# Display the audio immediately after generation
|
| 403 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} {os.path.basename(full_file)}")
|
|
|
|
| 417 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} Combined Summary Audio")
|
| 418 |
play_and_download_audio(combined_file, file_type=file_format)
|
| 419 |
papers.append({'title': 'Combined Summary', 'full_audio': combined_file})
|
|
|
|
|
|
|
|
|
|
| 420 |
|
| 421 |
def display_papers(papers):
|
| 422 |
"""
|
|
|
|
| 460 |
|
| 461 |
# Parse and process papers
|
| 462 |
papers = parse_arxiv_refs(refs)
|
|
|
|
| 463 |
if papers:
|
| 464 |
+
create_paper_audio_files(papers, input_question=q)
|
| 465 |
display_papers(papers)
|
| 466 |
else:
|
| 467 |
st.warning("No papers found in the response.")
|
|
|
|
| 470 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
| 471 |
|
| 472 |
# Save full transcript
|
| 473 |
+
create_file(q, result, "md")
|
|
|
|
|
|
|
| 474 |
return result
|
| 475 |
|
| 476 |
def process_with_gpt(text):
|
|
|
|
| 488 |
)
|
| 489 |
ans = c.choices[0].message.content
|
| 490 |
st.write("GPT-4o: " + ans)
|
| 491 |
+
create_file(text, ans, "md")
|
|
|
|
|
|
|
| 492 |
st.session_state.messages.append({"role":"assistant","content":ans})
|
| 493 |
return ans
|
| 494 |
|
|
|
|
| 506 |
)
|
| 507 |
ans = r.content[0].text
|
| 508 |
st.write("Claude-3.5: " + ans)
|
| 509 |
+
create_file(text, ans, "md")
|
|
|
|
|
|
|
| 510 |
st.session_state.chat_history.append({"user":text,"claude":ans})
|
| 511 |
return ans
|
| 512 |
|
| 513 |
# 📂 10. File Management
|
| 514 |
def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
| 515 |
"""Create zip with intelligent naming based on top 10 common words."""
|
| 516 |
+
# Exclude 'readme.md'
|
| 517 |
+
md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
|
| 518 |
all_files = md_files + mp3_files + wav_files
|
|
|
|
| 519 |
if not all_files:
|
| 520 |
return None
|
| 521 |
|
|
|
|
| 537 |
combined_content = " ".join(all_content)
|
| 538 |
info_terms = get_high_info_terms(combined_content, top_n=10)
|
| 539 |
|
| 540 |
+
timestamp = datetime.now().strftime("%y%m_%H%M")
|
| 541 |
name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:10])
|
| 542 |
zip_name = f"{timestamp}_{name_text}.zip"
|
| 543 |
|
|
|
|
| 547 |
|
| 548 |
return zip_name
|
| 549 |
|
| 550 |
+
def load_files_for_sidebar():
|
| 551 |
+
"""Load and group files for sidebar display"""
|
| 552 |
+
md_files = glob.glob("*.md")
|
| 553 |
+
mp3_files = glob.glob("*.mp3")
|
| 554 |
+
wav_files = glob.glob("*.wav") # 🆕 Load WAV files
|
| 555 |
+
|
| 556 |
+
md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
|
| 557 |
+
all_files = md_files + mp3_files + wav_files
|
| 558 |
+
|
| 559 |
+
groups = defaultdict(list)
|
| 560 |
+
for f in all_files:
|
| 561 |
+
# Treat underscores as spaces and split into words
|
| 562 |
+
words = os.path.basename(f).replace('_', ' ').split()
|
| 563 |
+
# Extract keywords from filename
|
| 564 |
+
keywords = get_high_info_terms(' '.join(words), top_n=5)
|
| 565 |
+
group_name = '_'.join(keywords) if keywords else 'Miscellaneous'
|
| 566 |
+
groups[group_name].append(f)
|
| 567 |
+
|
| 568 |
+
# Sort groups based on latest file modification time
|
| 569 |
+
sorted_groups = sorted(groups.items(), key=lambda x: max(os.path.getmtime(f) for f in x[1]), reverse=True)
|
| 570 |
+
return sorted_groups
|
| 571 |
+
|
| 572 |
+
def extract_keywords_from_md(files):
|
| 573 |
+
"""Extract keywords from markdown files"""
|
| 574 |
+
text = ""
|
| 575 |
+
for f in files:
|
| 576 |
+
if f.endswith(".md"):
|
| 577 |
+
c = open(f,'r',encoding='utf-8').read()
|
| 578 |
+
text += " " + c
|
| 579 |
+
return get_high_info_terms(text, top_n=5)
|
| 580 |
+
|
| 581 |
+
def display_file_manager_sidebar(groups_sorted):
|
| 582 |
"""Display file manager in sidebar"""
|
| 583 |
st.sidebar.title("🎵 Audio & Docs Manager")
|
| 584 |
|
| 585 |
+
all_md = []
|
| 586 |
+
all_mp3 = []
|
| 587 |
+
all_wav = [] # 🆕 List to hold WAV files
|
| 588 |
+
for group_name, files in groups_sorted:
|
| 589 |
+
for f in files:
|
| 590 |
+
if f.endswith(".md"):
|
| 591 |
+
all_md.append(f)
|
| 592 |
+
elif f.endswith(".mp3"):
|
| 593 |
+
all_mp3.append(f)
|
| 594 |
+
elif f.endswith(".wav"):
|
| 595 |
+
all_wav.append(f) # 🆕 Append WAV files
|
| 596 |
+
|
| 597 |
top_bar = st.sidebar.columns(4) # 🆕 Adjusted columns to accommodate WAV
|
| 598 |
with top_bar[0]:
|
| 599 |
if st.button("🗑 DelAllMD"):
|
| 600 |
+
for f in all_md:
|
|
|
|
|
|
|
| 601 |
os.remove(f)
|
| 602 |
st.session_state.should_rerun = True
|
| 603 |
with top_bar[1]:
|
| 604 |
if st.button("🗑 DelAllMP3"):
|
| 605 |
+
for f in all_mp3:
|
|
|
|
| 606 |
os.remove(f)
|
| 607 |
st.session_state.should_rerun = True
|
| 608 |
with top_bar[2]:
|
| 609 |
if st.button("🗑 DelAllWAV"):
|
| 610 |
+
for f in all_wav:
|
|
|
|
| 611 |
os.remove(f)
|
| 612 |
st.session_state.should_rerun = True
|
| 613 |
with top_bar[3]:
|
| 614 |
if st.button("⬇️ ZipAll"):
|
| 615 |
+
zip_name = create_zip_of_files(all_md, all_mp3, all_wav, input_question=st.session_state.get('last_query', ''))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 616 |
if zip_name:
|
| 617 |
st.sidebar.markdown(get_download_link(zip_name, file_type="zip"), unsafe_allow_html=True)
|
| 618 |
|
| 619 |
+
for group_name, files in groups_sorted:
|
| 620 |
+
keywords_str = group_name.replace('_', ' ') if group_name else "No Keywords"
|
| 621 |
+
with st.sidebar.expander(f"{FILE_EMOJIS.get('md', '')} {group_name} Files ({len(files)}) - KW: {keywords_str}", expanded=True):
|
| 622 |
c1,c2 = st.columns(2)
|
| 623 |
with c1:
|
| 624 |
+
if st.button("👀ViewGrp", key="view_group_"+group_name):
|
| 625 |
+
st.session_state.viewing_prefix = group_name
|
| 626 |
with c2:
|
| 627 |
+
if st.button("🗑DelGrp", key="del_group_"+group_name):
|
| 628 |
+
for f in files:
|
| 629 |
+
os.remove(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 630 |
st.success(f"Deleted group {group_name}!")
|
| 631 |
st.session_state.should_rerun = True
|
| 632 |
|
| 633 |
+
for f in files:
|
| 634 |
fname = os.path.basename(f)
|
| 635 |
ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
|
| 636 |
+
st.write(f"**{fname}** - {ctime}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 637 |
|
| 638 |
# 🎯 11. Main Application
|
| 639 |
def main():
|
|
|
|
| 726 |
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
| 727 |
titles_summary=titles_summary, full_audio=full_audio)
|
| 728 |
if full_transcript:
|
| 729 |
+
save_full_transcript(q, result)
|
|
|
|
| 730 |
|
| 731 |
st.markdown("### Change Prompt & Re-Run")
|
| 732 |
q_new = st.text_input("🔄 Modify Query:")
|
|
|
|
| 735 |
result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
| 736 |
titles_summary=titles_summary, full_audio=full_audio)
|
| 737 |
if full_transcript:
|
| 738 |
+
save_full_transcript(q_new, result)
|
|
|
|
| 739 |
|
| 740 |
elif tab_main == "🎤 Voice":
|
| 741 |
st.subheader("🎤 Voice Input")
|
|
|
|
| 795 |
st.write("Select a file from the sidebar to edit.")
|
| 796 |
|
| 797 |
# Load and display files in the sidebar
|
| 798 |
+
groups_sorted = load_files_for_sidebar()
|
| 799 |
+
display_file_manager_sidebar(groups_sorted)
|
| 800 |
+
|
| 801 |
+
if st.session_state.viewing_prefix and any(st.session_state.viewing_prefix == group for group, _ in groups_sorted):
|
| 802 |
+
st.write("---")
|
| 803 |
+
st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
|
| 804 |
+
for group_name, files in groups_sorted:
|
| 805 |
+
if group_name == st.session_state.viewing_prefix:
|
| 806 |
+
for f in files:
|
| 807 |
+
fname = os.path.basename(f)
|
| 808 |
+
ext = os.path.splitext(fname)[1].lower().strip('.')
|
| 809 |
+
st.write(f"### {fname}")
|
| 810 |
+
if ext == "md":
|
| 811 |
+
content = open(f,'r',encoding='utf-8').read()
|
| 812 |
+
st.markdown(content)
|
| 813 |
+
elif ext == "mp3":
|
| 814 |
+
st.audio(f)
|
| 815 |
+
elif ext == "wav":
|
| 816 |
+
st.audio(f) # 🆕 Handle WAV files
|
| 817 |
+
else:
|
| 818 |
+
st.markdown(get_download_link(f), unsafe_allow_html=True)
|
| 819 |
+
break
|
| 820 |
+
if st.button("❌ Close"):
|
| 821 |
+
st.session_state.viewing_prefix = None
|
| 822 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 823 |
markdownPapers = """
|
| 824 |
|
| 825 |
+
# Levels of AGI
|
| 826 |
|
| 827 |
## 1. Performance (rows) x Generality (columns)
|
| 828 |
- **Narrow**
|
|
|
|
| 897 |
- *Reference:* Stockfish (2023). **Stockfish Chess Engine**. [Website](https://stockfishchess.org)
|
| 898 |
- **Artificial Superintelligence (ASI)**
|
| 899 |
- Not yet achieved
|
| 900 |
+
|
| 901 |
|
| 902 |
# 🧬 Innovative Architecture of AlphaFold2: A Hybrid System
|
| 903 |
|
|
|
|
| 944 |
|
| 945 |
"""
|
| 946 |
st.sidebar.markdown(markdownPapers)
|
| 947 |
+
|
| 948 |
if st.session_state.should_rerun:
|
| 949 |
st.session_state.should_rerun = False
|
| 950 |
st.rerun()
|