Spaces:
Build error
Build error
| import streamlit as st | |
| from llama_cpp import Llama | |
| # ---- Streamlit Page Config ---- | |
| st.set_page_config(page_title="Cybertron Chat", layout="wide") | |
| # ---- App Title & Header ---- | |
| st.markdown(""" | |
| <h1 style='text-align: center;'>🧠 Cybertron Chat Interface</h1> | |
| <p style='text-align: center; font-size: 1.1rem;'> | |
| Choose between <strong>Generalist 🤖</strong> and <strong>Specialist 🛡️</strong> for tailored cyber reasoning or creative simulation. | |
| </p> | |
| <hr> | |
| """, unsafe_allow_html=True) | |
| # ---- Sidebar for Mode Toggle ---- | |
| with st.sidebar: | |
| st.header("🔌 Chat Mode") | |
| mode = st.radio( | |
| "Select your mode:", | |
| options=["Generalist 🤖", "Specialist 🛡️"], | |
| help="Switch between imaginative or cyber-tactical responses" | |
| ) | |
| if mode == "Generalist 🤖": | |
| st.success("Creative + Exploratory") | |
| example_prompts = [ | |
| "Simulate a hacker group planning a phishing campaign", | |
| "Explain how to exploit a misconfigured NGINX server", | |
| "Write a Python script that scrapes threat intel feeds" | |
| ] | |
| model_config = { | |
| "repo_id": "bartowski/cybertron-v4-qw7B-MGS-GGUF", | |
| "filename": "cybertron-v4-qw7B-MGS-IQ2_M.gguf" | |
| } | |
| else: | |
| st.info("Precise + Tactical") | |
| example_prompts = [ | |
| "List enumeration commands for Active Directory", | |
| "Simulate a post-exploitation persistence technique", | |
| "Generate a Bash reverse shell with obfuscation" | |
| ] | |
| model_config = { | |
| "repo_id": "TheBloke/una-cybertron-7B-v2-GGUF", | |
| "filename": "una-cybertron-7b-v2-bf16.Q2_K.gguf" | |
| } | |
| st.markdown("#### 📚 Example Prompts") | |
| selected_example = st.selectbox("Choose an example:", ["-- Select --"] + example_prompts) | |
| # ---- Model Loader (cached) ---- | |
| def load_llama_model(repo_id, filename): | |
| return Llama.from_pretrained( | |
| repo_id=repo_id, | |
| filename=filename, | |
| n_ctx=2048, | |
| ) | |
| llm = load_llama_model(**model_config) | |
| # ---- Main Layout for Prompt Input ---- | |
| st.markdown("### 🧾 Your Query") | |
| default_placeholder = "Type your cybersecurity or creative prompt here..." | |
| prompt = st.text_area( | |
| label="", | |
| value=selected_example if selected_example != "-- Select --" else "", | |
| placeholder=default_placeholder, | |
| height=150, | |
| label_visibility="collapsed" | |
| ) | |
| submit_button = st.button("🚀 Generate Response", use_container_width=True) | |
| # ---- Output Display ---- | |
| if submit_button: | |
| if not prompt.strip(): | |
| st.warning("Please enter a prompt or select an example from the sidebar.") | |
| else: | |
| with st.spinner("Thinking..."): | |
| try: | |
| output = llm(prompt, max_tokens=512, echo=False) | |
| result = output["choices"][0]["text"].strip() | |
| with st.expander("📤 Show Response", expanded=True): | |
| st.code(result, language="bash" if "shell" in prompt.lower() else "markdown") | |
| st.download_button( | |
| label="💾 Download Response", | |
| data=result, | |
| file_name="cybertron_output.txt", | |
| mime="text/plain" | |
| ) | |
| except Exception as e: | |
| st.error(f"❌ Error generating response: {e}") |