asammoud's picture
Upload 3 files
e5fa36b
import streamlit as st
from PIL import Image
import matplotlib.pyplot as plt
import networkx as nx
import json
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
torch.cuda.empty_cache()
#import openai
import os
import numpy as np
# Ensure models and datasets are available
from download_models import download_all
# Run only if critical file is missing
if not os.path.exists("P&ID-Symbols-3/train/_annotations.coco.json"):
with st.spinner("Downloading required files (models & datasets)..."):
download_all()
from pipeline.detector import detect_symbols_and_lines
from pipeline.graph_builder import build_graph
from pipeline.gnn_model import run_gnn
from pipeline.agent import generate_agent_actions
st.set_page_config(layout="wide")
st.title(" Agentic Predictive Maintenance (P&ID Graph + GNN)")
# Initialize session state variables
if "G" not in st.session_state:
st.session_state.G = None
if "feature_map" not in st.session_state:
st.session_state.feature_map = {}
if "scores" not in st.session_state:
st.session_state.scores = {}
#uploaded_file = st.file_uploader("Upload a P&ID Image", type=["png", "jpg", "jpeg"])
#if uploaded_file:
# === User can choose from downloaded dataset OR upload their own ===
st.subheader("Upload or Select a P&ID Drawing")
local_dataset_dir = "P&ID-Symbols-3/P&ID-Symbols-3/test"
image_files = []
if os.path.exists(local_dataset_dir):
image_files = [f for f in os.listdir(local_dataset_dir) if f.lower().endswith((".png", ".jpg", ".jpeg"))]
else:
st.warning(f"Dataset folder not found: {local_dataset_dir}. Please run download_models.py to download it.")
selected_image = st.selectbox("Select a sample from P&ID-Symbols-3:", ["-- Select an example --"] + image_files)
uploaded_file = st.file_uploader("...Or upload your own P&ID image", type=["png", "jpg", "jpeg"])
image = None
image_source = ""
if selected_image and selected_image != "-- Select an example --":
image_path = os.path.join(local_dataset_dir, selected_image)
image = Image.open(image_path)
image_source = f"Sample from dataset: {selected_image}"
elif uploaded_file:
image = Image.open(uploaded_file)
image_source = f"Uploaded: {uploaded_file.name}"
if image:
st.image(image, caption=image_source, use_column_width=True)
#image = Image.open(uploaded_file)
#st.image(image, caption="P&ID Diagram", use_column_width=True)
if st.button(" Run Detection and Analysis"):
# Uncomment these when detection and graph building pipelines are ready
detections, annotations, class_names = detect_symbols_and_lines(image)
graph = build_graph(image, detections, annotations, class_names)
st.info("Running anomaly detection on the graph (simulated for now)...")
fig, feature_map, red_nodes, central_node, scores, G = run_gnn()
st.session_state.G = G
st.session_state.feature_map = feature_map
st.session_state.scores = scores
st.pyplot(fig)
actions = generate_agent_actions(fig, feature_map, red_nodes, central_node, scores)
for action in actions:
st.write(action)
# === DeepSeek Local Model Setup ===
@st.cache_resource
def load_deepseek_model():
model_name = "deepseek-ai/deepseek-coder-1.3b-instruct" # Lightweight version
# model_name = "deepseek-ai/deepseek-llm-7b" # Larger but more capable
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
'''model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True'''
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="cpu",
#load_in_4bit=True, # 4-bit quantization
trust_remote_code=True
)
return model, tokenizer
# === Q&A Interface ===
st.subheader(" Ask Questions About the Graph (DeepSeek Local)")
user_query = st.chat_input("Ask a question about the graph...")
if user_query:
G = st.session_state.get("G")
feature_map = st.session_state.get("feature_map", {})
scores = st.session_state.get("scores", [])
if G is not None and feature_map and len(scores) > 0:
graph_data = {
"nodes": [
{
"id": str(i),
"label": feature_map[i] if i < len(feature_map) else f"Node {i}",
"score": float(scores[i]) if i < len(scores) else 0.0
}
for i in G.nodes()
],
"edges": [
{"source": str(u), "target": str(v)}
for u, v in G.edges()
]
}
prompt = (
"You are an expert graph analyst. Analyze this P&ID graph and answer the question.\n\n"
"### Graph Data:\n"
f"{json.dumps(graph_data, indent=2)}\n\n"
"### Question:\n"
f"{user_query}\n\n"
"### Answer:\n"
)
try:
with st.spinner("Thinking (via DeepSeek Local)..."):
# Load model (cached after first run)
model, tokenizer = load_deepseek_model()
# Generate response
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=128,
temperature=0.7,
do_sample=True
)
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the prompt from the answer
answer = answer[len(prompt):].strip()
st.markdown(f"**DeepSeek:** {answer}")
except Exception as e:
st.error(f"DeepSeek error: {e}")
st.error("Make sure you have enough GPU memory (8GB+ recommended for 7B model)")
else:
st.warning("Graph or scores are not ready yet.")