Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,20 +15,17 @@ from urllib.parse import quote
|
|
| 15 |
import streamlit as st
|
| 16 |
import streamlit.components.v1 as components
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
# (You can omit if you're not using HF or adapt your own client)
|
| 20 |
from huggingface_hub import InferenceClient
|
| 21 |
|
|
|
|
| 22 |
# ----------------------------
|
| 23 |
# Placeholder data structures
|
| 24 |
# ----------------------------
|
| 25 |
-
|
| 26 |
-
# Example placeholders for prompt prefixes
|
| 27 |
PromptPrefix = "AI-Search: "
|
| 28 |
PromptPrefix2 = "AI-Refine: "
|
| 29 |
PromptPrefix3 = "AI-JS: "
|
| 30 |
|
| 31 |
-
# Minimal example of a roleplaying glossary
|
| 32 |
roleplaying_glossary = {
|
| 33 |
"Core Rulebooks": {
|
| 34 |
"Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"],
|
|
@@ -39,13 +36,15 @@ roleplaying_glossary = {
|
|
| 39 |
}
|
| 40 |
}
|
| 41 |
|
| 42 |
-
# Minimal example of a transhuman glossary
|
| 43 |
transhuman_glossary = {
|
| 44 |
"Neural Interfaces": ["Cortex Jack", "Mind-Machine Fusion"],
|
| 45 |
"Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
|
| 46 |
}
|
| 47 |
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
| 49 |
def process_text(text):
|
| 50 |
st.write(f"process_text called with: {text}")
|
| 51 |
|
|
@@ -67,7 +66,8 @@ def process_video(video_file, seconds_per_frame):
|
|
| 67 |
def search_glossary(content):
|
| 68 |
st.write(f"search_glossary called with: {content}")
|
| 69 |
|
| 70 |
-
|
|
|
|
| 71 |
API_URL = "https://huggingface-inference-endpoint-placeholder"
|
| 72 |
API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
| 73 |
|
|
@@ -82,8 +82,7 @@ def InferenceLLM(prompt):
|
|
| 82 |
@st.cache_resource
|
| 83 |
def display_glossary_entity(k):
|
| 84 |
"""
|
| 85 |
-
|
| 86 |
-
This was in your original snippet. We'll keep it short.
|
| 87 |
"""
|
| 88 |
search_urls = {
|
| 89 |
"🚀🌌ArXiv": lambda k: f"/?q={quote(k)}",
|
|
@@ -151,8 +150,7 @@ def get_zip_download_link(zip_file):
|
|
| 151 |
with open(zip_file, 'rb') as f:
|
| 152 |
data = f.read()
|
| 153 |
b64 = base64.b64encode(data).decode()
|
| 154 |
-
|
| 155 |
-
return href
|
| 156 |
|
| 157 |
def get_table_download_link(file_path):
|
| 158 |
"""
|
|
@@ -174,8 +172,7 @@ def get_table_download_link(file_path):
|
|
| 174 |
'.wav': 'audio/wav'
|
| 175 |
}
|
| 176 |
mime_type = mime_map.get(ext, 'application/octet-stream')
|
| 177 |
-
|
| 178 |
-
return href
|
| 179 |
except:
|
| 180 |
return ''
|
| 181 |
|
|
@@ -193,7 +190,7 @@ def compare_and_delete_files(files):
|
|
| 193 |
for file in files:
|
| 194 |
size = os.path.getsize(file)
|
| 195 |
file_sizes.setdefault(size, []).append(file)
|
| 196 |
-
# Remove all but the latest file for each size
|
| 197 |
for size, paths in file_sizes.items():
|
| 198 |
if len(paths) > 1:
|
| 199 |
latest_file = max(paths, key=os.path.getmtime)
|
|
@@ -201,24 +198,24 @@ def compare_and_delete_files(files):
|
|
| 201 |
if file != latest_file:
|
| 202 |
os.remove(file)
|
| 203 |
st.success(f"Deleted {file} as a duplicate.")
|
| 204 |
-
|
| 205 |
|
| 206 |
def FileSidebar():
|
| 207 |
"""
|
| 208 |
Renders the file sidebar with all the open/view/run/delete logic.
|
| 209 |
"""
|
| 210 |
all_files = glob.glob("*.md")
|
| 211 |
-
#
|
| 212 |
-
all_files = [
|
| 213 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
|
| 214 |
|
| 215 |
-
#
|
| 216 |
Files1, Files2 = st.sidebar.columns(2)
|
| 217 |
with Files1:
|
| 218 |
if st.button("🗑 Delete All"):
|
| 219 |
for file in all_files:
|
| 220 |
os.remove(file)
|
| 221 |
-
st.
|
| 222 |
with Files2:
|
| 223 |
if st.button("⬇️ Download"):
|
| 224 |
zip_file = create_zip_of_files(all_files)
|
|
@@ -228,11 +225,9 @@ def FileSidebar():
|
|
| 228 |
file_name = ''
|
| 229 |
next_action = ''
|
| 230 |
|
| 231 |
-
# Each file row
|
| 232 |
for file in all_files:
|
| 233 |
col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1])
|
| 234 |
with col1:
|
| 235 |
-
# Show an emoji button to do "md"
|
| 236 |
if st.button("🌐", key="md_"+file):
|
| 237 |
file_contents = load_file(file)
|
| 238 |
file_name = file
|
|
@@ -258,12 +253,9 @@ def FileSidebar():
|
|
| 258 |
with col5:
|
| 259 |
if st.button("🗑", key="delete_"+file):
|
| 260 |
os.remove(file)
|
| 261 |
-
|
| 262 |
-
st.rerun()
|
| 263 |
-
next_action = 'delete'
|
| 264 |
-
st.session_state['next_action'] = next_action
|
| 265 |
|
| 266 |
-
#
|
| 267 |
file_sizes = [get_file_size(file) for file in all_files]
|
| 268 |
previous_size = None
|
| 269 |
st.sidebar.title("File Operations")
|
|
@@ -277,28 +269,26 @@ def FileSidebar():
|
|
| 277 |
file_content = f.read()
|
| 278 |
st.code(file_content, language="markdown")
|
| 279 |
except UnicodeDecodeError:
|
| 280 |
-
st.error("Failed to decode
|
| 281 |
if st.button("Delete", key=f"delete3_{file}"):
|
| 282 |
os.remove(file)
|
| 283 |
-
st.
|
| 284 |
previous_size = size
|
| 285 |
|
| 286 |
-
# If we
|
| 287 |
-
if
|
| 288 |
if next_action == 'open':
|
| 289 |
open1, open2 = st.columns([0.8, 0.2])
|
| 290 |
with open1:
|
| 291 |
-
file_name_input = st.text_input('File Name:', file_name, key='file_name_input'
|
| 292 |
file_content_area = st.text_area('File Contents:', file_contents, height=300, key='file_content_area')
|
| 293 |
|
| 294 |
-
# Minimal “Save” stubs
|
| 295 |
if st.button('💾 Save File'):
|
| 296 |
with open(file_name_input, 'w', encoding='utf-8') as f:
|
| 297 |
f.write(file_content_area)
|
| 298 |
st.markdown(f'Saved {file_name_input} successfully.')
|
| 299 |
|
| 300 |
elif next_action == 'search':
|
| 301 |
-
# Example usage
|
| 302 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 303 |
user_prompt = PromptPrefix2 + file_contents
|
| 304 |
st.markdown(user_prompt)
|
|
@@ -312,9 +302,9 @@ def FileSidebar():
|
|
| 312 |
st.write("Running GPT logic placeholder...")
|
| 313 |
|
| 314 |
|
| 315 |
-
#
|
| 316 |
# Basic Scoring / Glossaries
|
| 317 |
-
#
|
| 318 |
score_dir = "scores"
|
| 319 |
os.makedirs(score_dir, exist_ok=True)
|
| 320 |
|
|
@@ -354,7 +344,6 @@ def display_buttons_with_scores(num_columns_text):
|
|
| 354 |
"Kindred of the East": "🌅",
|
| 355 |
"Changeling": "🍃",
|
| 356 |
}
|
| 357 |
-
|
| 358 |
topic_emojis = {
|
| 359 |
"Core Rulebooks": "📚",
|
| 360 |
"Maps & Settings": "🗺️",
|
|
@@ -382,19 +371,18 @@ def display_buttons_with_scores(num_columns_text):
|
|
| 382 |
st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
|
| 383 |
|
| 384 |
|
| 385 |
-
#
|
| 386 |
# Image & Video Grids
|
| 387 |
-
#
|
| 388 |
def display_images_and_wikipedia_summaries(num_columns=4):
|
| 389 |
"""
|
| 390 |
-
Display
|
| 391 |
"""
|
| 392 |
image_files = [f for f in os.listdir('.') if f.endswith('.png')]
|
| 393 |
if not image_files:
|
| 394 |
st.write("No PNG images found in the current directory.")
|
| 395 |
return
|
| 396 |
|
| 397 |
-
# Sort by length of filename, just as an example
|
| 398 |
image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
|
| 399 |
cols = st.columns(num_columns)
|
| 400 |
col_index = 0
|
|
@@ -408,7 +396,7 @@ def display_images_and_wikipedia_summaries(num_columns=4):
|
|
| 408 |
display_glossary_entity(k)
|
| 409 |
# Provide a text input for user interactions
|
| 410 |
image_text_input = st.text_input(f"Prompt for {image_file}", key=f"image_prompt_{image_file}")
|
| 411 |
-
if
|
| 412 |
response = process_image(image_file, image_text_input)
|
| 413 |
st.markdown(response)
|
| 414 |
except:
|
|
@@ -417,7 +405,7 @@ def display_images_and_wikipedia_summaries(num_columns=4):
|
|
| 417 |
|
| 418 |
def display_videos_and_links(num_columns=4):
|
| 419 |
"""
|
| 420 |
-
Displays all .mp4
|
| 421 |
"""
|
| 422 |
video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
|
| 423 |
if not video_files:
|
|
@@ -433,11 +421,10 @@ def display_videos_and_links(num_columns=4):
|
|
| 433 |
k = video_file.split('.')[0]
|
| 434 |
st.video(video_file, format='video/mp4', start_time=0)
|
| 435 |
display_glossary_entity(k)
|
| 436 |
-
# Provide a text input
|
| 437 |
video_text_input = st.text_input(f"Video Prompt for {video_file}", key=f"video_prompt_{video_file}")
|
| 438 |
if video_text_input:
|
| 439 |
try:
|
| 440 |
-
#
|
| 441 |
seconds_per_frame = 10
|
| 442 |
process_video(video_file, seconds_per_frame)
|
| 443 |
except ValueError:
|
|
@@ -445,19 +432,25 @@ def display_videos_and_links(num_columns=4):
|
|
| 445 |
col_index += 1
|
| 446 |
|
| 447 |
|
| 448 |
-
#
|
| 449 |
-
# Query Param Helpers
|
| 450 |
-
#
|
| 451 |
-
def get_all_query_params(key):
|
| 452 |
-
return st.query_params().get(key, [])
|
| 453 |
-
|
| 454 |
def clear_query_params():
|
| 455 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 456 |
|
| 457 |
def display_content_or_image(query):
|
| 458 |
"""
|
| 459 |
-
If a query matches
|
| 460 |
-
a local image, show it. Otherwise warn no match.
|
| 461 |
"""
|
| 462 |
for category, term_list in transhuman_glossary.items():
|
| 463 |
for term in term_list:
|
|
@@ -478,8 +471,7 @@ def display_content_or_image(query):
|
|
| 478 |
# ------------------------------------
|
| 479 |
def generate_mermaid_html(mermaid_code: str) -> str:
|
| 480 |
"""
|
| 481 |
-
|
| 482 |
-
in <div class="mermaid"> and center it with CSS.
|
| 483 |
"""
|
| 484 |
return f"""
|
| 485 |
<html>
|
|
@@ -492,7 +484,6 @@ def generate_mermaid_html(mermaid_code: str) -> str:
|
|
| 492 |
margin: 20px auto;
|
| 493 |
}}
|
| 494 |
.mermaid {{
|
| 495 |
-
/* Let the diagram scale or otherwise style as you wish */
|
| 496 |
max-width: 800px;
|
| 497 |
}}
|
| 498 |
</style>
|
|
@@ -510,8 +501,7 @@ def generate_mermaid_html(mermaid_code: str) -> str:
|
|
| 510 |
|
| 511 |
def append_model_param(url: str, model_selected: bool) -> str:
|
| 512 |
"""
|
| 513 |
-
If '
|
| 514 |
-
We'll handle whether the URL already has a '?' or not.
|
| 515 |
"""
|
| 516 |
if not model_selected:
|
| 517 |
return url
|
|
@@ -519,10 +509,8 @@ def append_model_param(url: str, model_selected: bool) -> str:
|
|
| 519 |
return f"{url}{delimiter}model=1"
|
| 520 |
|
| 521 |
|
| 522 |
-
# Default Mermaid diagram with "click" lines
|
| 523 |
DEFAULT_MERMAID = """
|
| 524 |
flowchart LR
|
| 525 |
-
%% Notice we have "click LLM ..." lines:
|
| 526 |
U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\\nExtract Info]
|
| 527 |
click U "/?q=User%20😎" _self
|
| 528 |
click LLM "/?q=LLM%20Agent%20Extract%20Info" _self
|
|
@@ -537,28 +525,62 @@ flowchart LR
|
|
| 537 |
click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" _self
|
| 538 |
"""
|
| 539 |
|
|
|
|
| 540 |
# ---------------------------
|
| 541 |
-
# Streamlit
|
| 542 |
# ---------------------------
|
| 543 |
def main():
|
| 544 |
st.set_page_config(page_title="Mermaid + Clickable Links Demo", layout="wide")
|
| 545 |
|
| 546 |
-
#
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 552 |
st.sidebar.write("## Diagram Link Settings")
|
| 553 |
model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
|
| 554 |
|
| 555 |
-
#
|
| 556 |
base_diagram = DEFAULT_MERMAID
|
| 557 |
lines = base_diagram.strip().split("\n")
|
| 558 |
new_lines = []
|
| 559 |
for line in lines:
|
| 560 |
if "click " in line and '"/?' in line:
|
| 561 |
-
# e.g. click
|
| 562 |
parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
|
| 563 |
if len(parts) == 4:
|
| 564 |
url = parts[1]
|
|
@@ -569,22 +591,18 @@ def main():
|
|
| 569 |
new_lines.append(line)
|
| 570 |
else:
|
| 571 |
new_lines.append(line)
|
| 572 |
-
|
| 573 |
mermaid_code = "\n".join(new_lines)
|
| 574 |
|
| 575 |
-
#
|
|
|
|
|
|
|
| 576 |
st.title("Top-Centered Mermaid Diagram with Clickable Links 🏺")
|
| 577 |
diagram_html = generate_mermaid_html(mermaid_code)
|
| 578 |
components.html(diagram_html, height=400, scrolling=True)
|
| 579 |
|
| 580 |
-
#
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
display_content_or_image(current_q)
|
| 584 |
-
if current_r:
|
| 585 |
-
st.markdown(f"**Detected Relationship**: `?r={current_r}`")
|
| 586 |
-
|
| 587 |
-
# 6) Editor Columns: Markdown & Mermaid
|
| 588 |
left_col, right_col = st.columns(2)
|
| 589 |
|
| 590 |
# --- Left: Markdown Editor
|
|
@@ -592,7 +610,6 @@ def main():
|
|
| 592 |
st.subheader("Markdown Side 📝")
|
| 593 |
if "markdown_text" not in st.session_state:
|
| 594 |
st.session_state["markdown_text"] = "## Hello!\nType some *Markdown* here.\n"
|
| 595 |
-
# Text area
|
| 596 |
markdown_text = st.text_area(
|
| 597 |
"Edit Markdown:",
|
| 598 |
value=st.session_state["markdown_text"],
|
|
@@ -600,7 +617,6 @@ def main():
|
|
| 600 |
)
|
| 601 |
st.session_state["markdown_text"] = markdown_text
|
| 602 |
|
| 603 |
-
# Button row
|
| 604 |
colA, colB = st.columns(2)
|
| 605 |
with colA:
|
| 606 |
if st.button("🔄 Refresh Markdown"):
|
|
@@ -608,9 +624,8 @@ def main():
|
|
| 608 |
with colB:
|
| 609 |
if st.button("❌ Clear Markdown"):
|
| 610 |
st.session_state["markdown_text"] = ""
|
| 611 |
-
st.
|
| 612 |
|
| 613 |
-
# Show the rendered Markdown below
|
| 614 |
st.markdown("---")
|
| 615 |
st.markdown("**Preview:**")
|
| 616 |
st.markdown(markdown_text)
|
|
@@ -628,23 +643,24 @@ def main():
|
|
| 628 |
height=300
|
| 629 |
)
|
| 630 |
|
| 631 |
-
# A small button bar
|
| 632 |
colC, colD = st.columns(2)
|
| 633 |
with colC:
|
| 634 |
if st.button("🎨 Refresh Diagram"):
|
| 635 |
st.session_state["current_mermaid"] = mermaid_input
|
| 636 |
st.write("**Mermaid** diagram refreshed! 🌈")
|
| 637 |
-
st.
|
| 638 |
with colD:
|
| 639 |
if st.button("❌ Clear Mermaid"):
|
| 640 |
st.session_state["current_mermaid"] = ""
|
| 641 |
-
st.
|
| 642 |
|
| 643 |
st.markdown("---")
|
| 644 |
st.markdown("**Mermaid Source:**")
|
| 645 |
st.code(mermaid_input, language="python", line_numbers=True)
|
| 646 |
|
| 647 |
-
#
|
|
|
|
|
|
|
| 648 |
st.markdown("---")
|
| 649 |
st.header("Media Galleries")
|
| 650 |
|
|
@@ -654,7 +670,9 @@ def main():
|
|
| 654 |
num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
|
| 655 |
display_videos_and_links(num_columns_video)
|
| 656 |
|
| 657 |
-
#
|
|
|
|
|
|
|
| 658 |
showExtendedTextInterface = False
|
| 659 |
if showExtendedTextInterface:
|
| 660 |
display_glossary_grid(roleplaying_glossary)
|
|
@@ -662,10 +680,14 @@ def main():
|
|
| 662 |
display_buttons_with_scores(num_columns_text)
|
| 663 |
st.markdown("Extended text interface is on...")
|
| 664 |
|
| 665 |
-
#
|
|
|
|
|
|
|
| 666 |
FileSidebar()
|
| 667 |
|
| 668 |
-
#
|
|
|
|
|
|
|
| 669 |
titles = [
|
| 670 |
"🧠🎭 Semantic Symphonies & Episodic Encores",
|
| 671 |
"🌌🎼 AI Rhythms of Memory Lane",
|
|
|
|
| 15 |
import streamlit as st
|
| 16 |
import streamlit.components.v1 as components
|
| 17 |
|
| 18 |
+
# (Optional) If you use huggingface_hub:
|
|
|
|
| 19 |
from huggingface_hub import InferenceClient
|
| 20 |
|
| 21 |
+
|
| 22 |
# ----------------------------
|
| 23 |
# Placeholder data structures
|
| 24 |
# ----------------------------
|
|
|
|
|
|
|
| 25 |
PromptPrefix = "AI-Search: "
|
| 26 |
PromptPrefix2 = "AI-Refine: "
|
| 27 |
PromptPrefix3 = "AI-JS: "
|
| 28 |
|
|
|
|
| 29 |
roleplaying_glossary = {
|
| 30 |
"Core Rulebooks": {
|
| 31 |
"Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"],
|
|
|
|
| 36 |
}
|
| 37 |
}
|
| 38 |
|
|
|
|
| 39 |
transhuman_glossary = {
|
| 40 |
"Neural Interfaces": ["Cortex Jack", "Mind-Machine Fusion"],
|
| 41 |
"Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
|
| 42 |
}
|
| 43 |
|
| 44 |
+
|
| 45 |
+
# ------------------------------------------
|
| 46 |
+
# Example stubs for placeholders
|
| 47 |
+
# ------------------------------------------
|
| 48 |
def process_text(text):
|
| 49 |
st.write(f"process_text called with: {text}")
|
| 50 |
|
|
|
|
| 66 |
def search_glossary(content):
|
| 67 |
st.write(f"search_glossary called with: {content}")
|
| 68 |
|
| 69 |
+
|
| 70 |
+
# If you have HF Inference endpoints, placeholders here
|
| 71 |
API_URL = "https://huggingface-inference-endpoint-placeholder"
|
| 72 |
API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
| 73 |
|
|
|
|
| 82 |
@st.cache_resource
|
| 83 |
def display_glossary_entity(k):
|
| 84 |
"""
|
| 85 |
+
Creates multiple links (emojis) for a single entity.
|
|
|
|
| 86 |
"""
|
| 87 |
search_urls = {
|
| 88 |
"🚀🌌ArXiv": lambda k: f"/?q={quote(k)}",
|
|
|
|
| 150 |
with open(zip_file, 'rb') as f:
|
| 151 |
data = f.read()
|
| 152 |
b64 = base64.b64encode(data).decode()
|
| 153 |
+
return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
|
|
|
| 154 |
|
| 155 |
def get_table_download_link(file_path):
|
| 156 |
"""
|
|
|
|
| 172 |
'.wav': 'audio/wav'
|
| 173 |
}
|
| 174 |
mime_type = mime_map.get(ext, 'application/octet-stream')
|
| 175 |
+
return f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
|
|
|
|
| 176 |
except:
|
| 177 |
return ''
|
| 178 |
|
|
|
|
| 190 |
for file in files:
|
| 191 |
size = os.path.getsize(file)
|
| 192 |
file_sizes.setdefault(size, []).append(file)
|
| 193 |
+
# Remove all but the latest file for each size group
|
| 194 |
for size, paths in file_sizes.items():
|
| 195 |
if len(paths) > 1:
|
| 196 |
latest_file = max(paths, key=os.path.getmtime)
|
|
|
|
| 198 |
if file != latest_file:
|
| 199 |
os.remove(file)
|
| 200 |
st.success(f"Deleted {file} as a duplicate.")
|
| 201 |
+
|
| 202 |
|
| 203 |
def FileSidebar():
|
| 204 |
"""
|
| 205 |
Renders the file sidebar with all the open/view/run/delete logic.
|
| 206 |
"""
|
| 207 |
all_files = glob.glob("*.md")
|
| 208 |
+
# Filter out short-named or undesired files, if needed:
|
| 209 |
+
all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
|
| 210 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
|
| 211 |
|
| 212 |
+
# "Delete All" and "Download" buttons
|
| 213 |
Files1, Files2 = st.sidebar.columns(2)
|
| 214 |
with Files1:
|
| 215 |
if st.button("🗑 Delete All"):
|
| 216 |
for file in all_files:
|
| 217 |
os.remove(file)
|
| 218 |
+
st.experimental_rerun() # or remove if you prefer no rerun
|
| 219 |
with Files2:
|
| 220 |
if st.button("⬇️ Download"):
|
| 221 |
zip_file = create_zip_of_files(all_files)
|
|
|
|
| 225 |
file_name = ''
|
| 226 |
next_action = ''
|
| 227 |
|
|
|
|
| 228 |
for file in all_files:
|
| 229 |
col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1])
|
| 230 |
with col1:
|
|
|
|
| 231 |
if st.button("🌐", key="md_"+file):
|
| 232 |
file_contents = load_file(file)
|
| 233 |
file_name = file
|
|
|
|
| 253 |
with col5:
|
| 254 |
if st.button("🗑", key="delete_"+file):
|
| 255 |
os.remove(file)
|
| 256 |
+
st.experimental_rerun() # or remove if no rerun needed
|
|
|
|
|
|
|
|
|
|
| 257 |
|
| 258 |
+
# Optional: show file sizes
|
| 259 |
file_sizes = [get_file_size(file) for file in all_files]
|
| 260 |
previous_size = None
|
| 261 |
st.sidebar.title("File Operations")
|
|
|
|
| 269 |
file_content = f.read()
|
| 270 |
st.code(file_content, language="markdown")
|
| 271 |
except UnicodeDecodeError:
|
| 272 |
+
st.error("Failed to decode file with UTF-8.")
|
| 273 |
if st.button("Delete", key=f"delete3_{file}"):
|
| 274 |
os.remove(file)
|
| 275 |
+
st.experimental_rerun()
|
| 276 |
previous_size = size
|
| 277 |
|
| 278 |
+
# If we've loaded content from a file
|
| 279 |
+
if file_contents:
|
| 280 |
if next_action == 'open':
|
| 281 |
open1, open2 = st.columns([0.8, 0.2])
|
| 282 |
with open1:
|
| 283 |
+
file_name_input = st.text_input('File Name:', file_name, key='file_name_input')
|
| 284 |
file_content_area = st.text_area('File Contents:', file_contents, height=300, key='file_content_area')
|
| 285 |
|
|
|
|
| 286 |
if st.button('💾 Save File'):
|
| 287 |
with open(file_name_input, 'w', encoding='utf-8') as f:
|
| 288 |
f.write(file_content_area)
|
| 289 |
st.markdown(f'Saved {file_name_input} successfully.')
|
| 290 |
|
| 291 |
elif next_action == 'search':
|
|
|
|
| 292 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
| 293 |
user_prompt = PromptPrefix2 + file_contents
|
| 294 |
st.markdown(user_prompt)
|
|
|
|
| 302 |
st.write("Running GPT logic placeholder...")
|
| 303 |
|
| 304 |
|
| 305 |
+
# -------------------------------------------
|
| 306 |
# Basic Scoring / Glossaries
|
| 307 |
+
# -------------------------------------------
|
| 308 |
score_dir = "scores"
|
| 309 |
os.makedirs(score_dir, exist_ok=True)
|
| 310 |
|
|
|
|
| 344 |
"Kindred of the East": "🌅",
|
| 345 |
"Changeling": "🍃",
|
| 346 |
}
|
|
|
|
| 347 |
topic_emojis = {
|
| 348 |
"Core Rulebooks": "📚",
|
| 349 |
"Maps & Settings": "🗺️",
|
|
|
|
| 371 |
st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
|
| 372 |
|
| 373 |
|
| 374 |
+
# --------------------------------------
|
| 375 |
# Image & Video Grids
|
| 376 |
+
# --------------------------------------
|
| 377 |
def display_images_and_wikipedia_summaries(num_columns=4):
|
| 378 |
"""
|
| 379 |
+
Display .png images in a grid with text input prompts.
|
| 380 |
"""
|
| 381 |
image_files = [f for f in os.listdir('.') if f.endswith('.png')]
|
| 382 |
if not image_files:
|
| 383 |
st.write("No PNG images found in the current directory.")
|
| 384 |
return
|
| 385 |
|
|
|
|
| 386 |
image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
|
| 387 |
cols = st.columns(num_columns)
|
| 388 |
col_index = 0
|
|
|
|
| 396 |
display_glossary_entity(k)
|
| 397 |
# Provide a text input for user interactions
|
| 398 |
image_text_input = st.text_input(f"Prompt for {image_file}", key=f"image_prompt_{image_file}")
|
| 399 |
+
if image_text_input:
|
| 400 |
response = process_image(image_file, image_text_input)
|
| 401 |
st.markdown(response)
|
| 402 |
except:
|
|
|
|
| 405 |
|
| 406 |
def display_videos_and_links(num_columns=4):
|
| 407 |
"""
|
| 408 |
+
Displays all .mp4/.webm videos in the directory in a grid, with text input prompts.
|
| 409 |
"""
|
| 410 |
video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
|
| 411 |
if not video_files:
|
|
|
|
| 421 |
k = video_file.split('.')[0]
|
| 422 |
st.video(video_file, format='video/mp4', start_time=0)
|
| 423 |
display_glossary_entity(k)
|
|
|
|
| 424 |
video_text_input = st.text_input(f"Video Prompt for {video_file}", key=f"video_prompt_{video_file}")
|
| 425 |
if video_text_input:
|
| 426 |
try:
|
| 427 |
+
# For demonstration
|
| 428 |
seconds_per_frame = 10
|
| 429 |
process_video(video_file, seconds_per_frame)
|
| 430 |
except ValueError:
|
|
|
|
| 432 |
col_index += 1
|
| 433 |
|
| 434 |
|
| 435 |
+
# --------------------------------------
|
| 436 |
+
# Query Param Helpers (No experimental)
|
| 437 |
+
# --------------------------------------
|
|
|
|
|
|
|
|
|
|
| 438 |
def clear_query_params():
|
| 439 |
+
"""
|
| 440 |
+
In Streamlit, there's no direct method to "clear" query params
|
| 441 |
+
without rewriting the URL. One workaround is to do:
|
| 442 |
+
|
| 443 |
+
st.experimental_set_query_params() # with no arguments
|
| 444 |
+
|
| 445 |
+
But if you want to avoid *all* experimental calls,
|
| 446 |
+
you can provide a link or button that leads to a new URL without params.
|
| 447 |
+
"""
|
| 448 |
+
st.warning("Use a redirect or link that excludes query parameters.")
|
| 449 |
+
|
| 450 |
|
| 451 |
def display_content_or_image(query):
|
| 452 |
"""
|
| 453 |
+
If a query matches transhuman_glossary or a local image, display it.
|
|
|
|
| 454 |
"""
|
| 455 |
for category, term_list in transhuman_glossary.items():
|
| 456 |
for term in term_list:
|
|
|
|
| 471 |
# ------------------------------------
|
| 472 |
def generate_mermaid_html(mermaid_code: str) -> str:
|
| 473 |
"""
|
| 474 |
+
Embeds a mermaid diagram in HTML, centered.
|
|
|
|
| 475 |
"""
|
| 476 |
return f"""
|
| 477 |
<html>
|
|
|
|
| 484 |
margin: 20px auto;
|
| 485 |
}}
|
| 486 |
.mermaid {{
|
|
|
|
| 487 |
max-width: 800px;
|
| 488 |
}}
|
| 489 |
</style>
|
|
|
|
| 501 |
|
| 502 |
def append_model_param(url: str, model_selected: bool) -> str:
|
| 503 |
"""
|
| 504 |
+
If 'model=1' is desired, we append it to each URL in the diagram.
|
|
|
|
| 505 |
"""
|
| 506 |
if not model_selected:
|
| 507 |
return url
|
|
|
|
| 509 |
return f"{url}{delimiter}model=1"
|
| 510 |
|
| 511 |
|
|
|
|
| 512 |
DEFAULT_MERMAID = """
|
| 513 |
flowchart LR
|
|
|
|
| 514 |
U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\\nExtract Info]
|
| 515 |
click U "/?q=User%20😎" _self
|
| 516 |
click LLM "/?q=LLM%20Agent%20Extract%20Info" _self
|
|
|
|
| 525 |
click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" _self
|
| 526 |
"""
|
| 527 |
|
| 528 |
+
|
| 529 |
# ---------------------------
|
| 530 |
+
# Main Streamlit App
|
| 531 |
# ---------------------------
|
| 532 |
def main():
|
| 533 |
st.set_page_config(page_title="Mermaid + Clickable Links Demo", layout="wide")
|
| 534 |
|
| 535 |
+
# ---------------------------------------------
|
| 536 |
+
# Query Parameter Parsing (No experimental usage)
|
| 537 |
+
# ---------------------------------------------
|
| 538 |
+
try:
|
| 539 |
+
query_params = st.query_params
|
| 540 |
+
|
| 541 |
+
# Look for 'q' or 'query'
|
| 542 |
+
query_list = (query_params.get('q') or query_params.get('query') or [''])
|
| 543 |
+
q_or_query = query_list[0] if len(query_list) > 0 else ''
|
| 544 |
+
if q_or_query.strip():
|
| 545 |
+
filesearch = PromptPrefix + q_or_query
|
| 546 |
+
st.markdown(filesearch)
|
| 547 |
+
process_text(filesearch)
|
| 548 |
+
|
| 549 |
+
except Exception as e:
|
| 550 |
+
st.markdown(" ") # do nothing if there's an error
|
| 551 |
+
|
| 552 |
+
# If 'action' in st.query_params
|
| 553 |
+
if 'action' in st.query_params:
|
| 554 |
+
action_list = st.query_params['action']
|
| 555 |
+
if action_list:
|
| 556 |
+
action = action_list[0]
|
| 557 |
+
if action == 'show_message':
|
| 558 |
+
st.success("Showing a message because 'action=show_message' was found in the URL.")
|
| 559 |
+
elif action == 'clear':
|
| 560 |
+
clear_query_params()
|
| 561 |
+
# If you wanted a full rerun with no params, you'd do a redirect or
|
| 562 |
+
# st.experimental_set_query_params() with no arguments (but that's experimental).
|
| 563 |
+
|
| 564 |
+
# If 'query' param is present, show content or image
|
| 565 |
+
if 'query' in st.query_params:
|
| 566 |
+
query_list2 = st.query_params['query']
|
| 567 |
+
if query_list2 and len(query_list2) > 0:
|
| 568 |
+
query_val = query_list2[0]
|
| 569 |
+
display_content_or_image(query_val)
|
| 570 |
+
|
| 571 |
+
# ---------------------------------------------
|
| 572 |
+
# Let user pick if they want to add ?model=1
|
| 573 |
+
# ---------------------------------------------
|
| 574 |
st.sidebar.write("## Diagram Link Settings")
|
| 575 |
model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
|
| 576 |
|
| 577 |
+
# Rebuild the dynamic Mermaid code
|
| 578 |
base_diagram = DEFAULT_MERMAID
|
| 579 |
lines = base_diagram.strip().split("\n")
|
| 580 |
new_lines = []
|
| 581 |
for line in lines:
|
| 582 |
if "click " in line and '"/?' in line:
|
| 583 |
+
# e.g. click U "/?q=User" _self
|
| 584 |
parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
|
| 585 |
if len(parts) == 4:
|
| 586 |
url = parts[1]
|
|
|
|
| 591 |
new_lines.append(line)
|
| 592 |
else:
|
| 593 |
new_lines.append(line)
|
|
|
|
| 594 |
mermaid_code = "\n".join(new_lines)
|
| 595 |
|
| 596 |
+
# ---------------------------------------------
|
| 597 |
+
# Render top-centered Mermaid diagram
|
| 598 |
+
# ---------------------------------------------
|
| 599 |
st.title("Top-Centered Mermaid Diagram with Clickable Links 🏺")
|
| 600 |
diagram_html = generate_mermaid_html(mermaid_code)
|
| 601 |
components.html(diagram_html, height=400, scrolling=True)
|
| 602 |
|
| 603 |
+
# ---------------------------------------------
|
| 604 |
+
# Two-column layout: Markdown & Mermaid Editors
|
| 605 |
+
# ---------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 606 |
left_col, right_col = st.columns(2)
|
| 607 |
|
| 608 |
# --- Left: Markdown Editor
|
|
|
|
| 610 |
st.subheader("Markdown Side 📝")
|
| 611 |
if "markdown_text" not in st.session_state:
|
| 612 |
st.session_state["markdown_text"] = "## Hello!\nType some *Markdown* here.\n"
|
|
|
|
| 613 |
markdown_text = st.text_area(
|
| 614 |
"Edit Markdown:",
|
| 615 |
value=st.session_state["markdown_text"],
|
|
|
|
| 617 |
)
|
| 618 |
st.session_state["markdown_text"] = markdown_text
|
| 619 |
|
|
|
|
| 620 |
colA, colB = st.columns(2)
|
| 621 |
with colA:
|
| 622 |
if st.button("🔄 Refresh Markdown"):
|
|
|
|
| 624 |
with colB:
|
| 625 |
if st.button("❌ Clear Markdown"):
|
| 626 |
st.session_state["markdown_text"] = ""
|
| 627 |
+
st.rerun() # non-experimental re-run if available in your Streamlit version
|
| 628 |
|
|
|
|
| 629 |
st.markdown("---")
|
| 630 |
st.markdown("**Preview:**")
|
| 631 |
st.markdown(markdown_text)
|
|
|
|
| 643 |
height=300
|
| 644 |
)
|
| 645 |
|
|
|
|
| 646 |
colC, colD = st.columns(2)
|
| 647 |
with colC:
|
| 648 |
if st.button("🎨 Refresh Diagram"):
|
| 649 |
st.session_state["current_mermaid"] = mermaid_input
|
| 650 |
st.write("**Mermaid** diagram refreshed! 🌈")
|
| 651 |
+
st.rerun()
|
| 652 |
with colD:
|
| 653 |
if st.button("❌ Clear Mermaid"):
|
| 654 |
st.session_state["current_mermaid"] = ""
|
| 655 |
+
st.rerun()
|
| 656 |
|
| 657 |
st.markdown("---")
|
| 658 |
st.markdown("**Mermaid Source:**")
|
| 659 |
st.code(mermaid_input, language="python", line_numbers=True)
|
| 660 |
|
| 661 |
+
# ---------------------------------------------
|
| 662 |
+
# Media Galleries
|
| 663 |
+
# ---------------------------------------------
|
| 664 |
st.markdown("---")
|
| 665 |
st.header("Media Galleries")
|
| 666 |
|
|
|
|
| 670 |
num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
|
| 671 |
display_videos_and_links(num_columns_video)
|
| 672 |
|
| 673 |
+
# ---------------------------------------------
|
| 674 |
+
# Optional Extended UI
|
| 675 |
+
# ---------------------------------------------
|
| 676 |
showExtendedTextInterface = False
|
| 677 |
if showExtendedTextInterface:
|
| 678 |
display_glossary_grid(roleplaying_glossary)
|
|
|
|
| 680 |
display_buttons_with_scores(num_columns_text)
|
| 681 |
st.markdown("Extended text interface is on...")
|
| 682 |
|
| 683 |
+
# ---------------------------------------------
|
| 684 |
+
# File Sidebar
|
| 685 |
+
# ---------------------------------------------
|
| 686 |
FileSidebar()
|
| 687 |
|
| 688 |
+
# ---------------------------------------------
|
| 689 |
+
# Random Title at the bottom
|
| 690 |
+
# ---------------------------------------------
|
| 691 |
titles = [
|
| 692 |
"🧠🎭 Semantic Symphonies & Episodic Encores",
|
| 693 |
"🌌🎼 AI Rhythms of Memory Lane",
|