monitkorn commited on
Commit
f47e188
·
1 Parent(s): 0a3c132

first commit

Browse files
Files changed (3) hide show
  1. README.md +27 -4
  2. app.py +247 -0
  3. requirements.txt +8 -0
README.md CHANGED
@@ -1,14 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: DocuCite Agent
3
- emoji: 👀
4
- colorFrom: green
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.33.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: Retrieve context with citation
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ Demo:
2
+ https://github.com/user-attachments/assets/24056190-9413-4bb3-9918-fcd739b690a5
3
+
4
+ Team Members
5
+
6
+ Monit KORN
7
+ Setthika SUN
8
+ <a name="mcp-server-track"></a>
9
+
10
+ DocuCite-AI
11
+
12
+ This repository contains is a Gradio-based Agentic Retrieval-Augmented Generation application using LangChain and LangGraph. It answers questions based on the PDF file you upload, the tool automatically retrieving and citing the page and paragraph where it found the supporting information.
13
+
14
+ Features
15
+
16
+ 1. PDF Upload & Embedding: Upload any PDF, automatically chunk, clean, and embed its contents into a vector store (Chroma).
17
+
18
+ 2. Agentic Retrieval: Uses LangGraph to orchestrate an agentic workflow that decides when to call a retrieval tool or generate a direct answer.
19
+
20
+ 3. Citation-Aware Tool: Custom document_search tool returns results prefixed with [Page X, Paragraph Y].
21
+
22
+ 4. Interactive UI: Built with Gradio for easy deployment and use.
23
+
24
+
25
  ---
26
  title: DocuCite Agent
27
+ emoji: 👁
28
+ colorFrom: pink
29
+ colorTo: red
30
  sdk: gradio
31
  sdk_version: 5.33.1
32
  app_file: app.py
33
  pinned: false
34
  license: mit
 
35
  ---
36
 
37
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import tempfile
4
+ from pathlib import Path
5
+ import gradio as gr
6
+ import pdfplumber
7
+ import numpy as np
8
+ import hashlib, tempfile, pathlib, torch
9
+ from langchain_community.embeddings import HuggingFaceEmbeddings
10
+ from langchain_community.vectorstores import Chroma
11
+ from langgraph.graph import MessagesState, StateGraph
12
+ from langchain.docstore.document import Document
13
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
14
+ import pdfplumber
15
+ from langchain_openai import ChatOpenAI
16
+
17
+
18
+ device = "cuda" if torch.cuda.is_available() else "cpu"
19
+ EMBEDDER = HuggingFaceEmbeddings(
20
+ model_name="BAAI/bge-m3",
21
+ encode_kwargs={"normalize_embeddings": True, "device": device},
22
+ )
23
+
24
+
25
+ LLM = ChatOpenAI(
26
+ openai_api_key="eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDExMTYxMjA0MzQ0ODU0NTI5MTczNCIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNzA0Mjc0OCwidXVpZCI6ImY4ZWEzOGUyLTllNjktNDM3NS05YjkzLWE3Y2EzMThiMjZjZCIsIm5hbWUiOiJoYWNrYXRob24iLCJleHBpcmVzX2F0IjoiMjAzMC0wNi0wN1QwNjowNTo0OCswMDAwIn0.DH7JrezDuqrl2SPMdWdWWnWgBPrvBbe9yucG29-3YpQ",
27
+ openai_api_base="https://api.studio.nebius.com/v1",
28
+ model="Qwen/Qwen2.5-72B-Instruct"
29
+ )
30
+
31
+ from pathlib import Path
32
+
33
+ def get_file_bytes_and_name(pdf_file):
34
+ print("DEBUG: pdf_file type:", type(pdf_file))
35
+ print("DEBUG: pdf_file dir:", dir(pdf_file))
36
+ print("DEBUG: pdf_file repr:", repr(pdf_file))
37
+
38
+ if hasattr(pdf_file, "read"):
39
+ return pdf_file.read(), Path(pdf_file.name).name
40
+ if isinstance(pdf_file, str):
41
+ file_path = Path(pdf_file)
42
+ with open(file_path, "rb") as f:
43
+ return f.read(), file_path.name
44
+ raise ValueError("Could not extract file bytes from uploaded file.")
45
+
46
+
47
+ VECTOR_ROOT = pathlib.Path.home() / ".rag_vectors"
48
+ VECTOR_ROOT.mkdir(exist_ok=True)
49
+
50
+ # ────────────── 3. PDF-to-vectorstore, clean and tag paragraphs ──────────────
51
+ def load_or_create_chroma(pdf_bytes: bytes, filename: str) -> Chroma:
52
+ """
53
+ Loads persistent Chroma vectorstore for this PDF, or creates it if not found.
54
+ Each chunk carries page and paragraph info.
55
+ """
56
+ print(f"\n[INFO] Checking vectorstore for file: {filename}")
57
+ h = hashlib.md5(pdf_bytes).hexdigest()
58
+ vect_dir = VECTOR_ROOT / h
59
+ if (vect_dir / "chroma.sqlite3").exists():
60
+ print(f"[INFO] Found existing vectorstore: {vect_dir}")
61
+ return Chroma(persist_directory=str(vect_dir), embedding_function=EMBEDDER)
62
+
63
+ print(f"[INFO] No vectorstore found, embedding file: {filename}")
64
+ with tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) as tmp:
65
+ tmp.write(pdf_bytes)
66
+ tmp_path = tmp.name
67
+
68
+ docs = []
69
+ BAD_PHRASES = {
70
+ "Abstracting with credit is permitted",
71
+ "Permission to make digital or hard copies",
72
+ "arXiv:",
73
+ "©",
74
+ }
75
+
76
+ def clean_page(text: str) -> str:
77
+ return "\n".join(
78
+ line for line in text.splitlines()
79
+ if not any(b in line for b in BAD_PHRASES)
80
+ )
81
+
82
+ with pdfplumber.open(tmp_path) as pdf:
83
+ for page_num, page in enumerate(pdf.pages, start=1):
84
+ text = clean_page(page.extract_text() or "")
85
+ if not text.strip():
86
+ continue
87
+ # Split into small chunks for embedding
88
+ splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
89
+ chunk_size=1200, chunk_overlap=200
90
+ )
91
+ para_chunks = splitter.split_text(text)
92
+ for para_num, chunk in enumerate(para_chunks, start=1):
93
+ docs.append(
94
+ Document(
95
+ page_content=chunk,
96
+ metadata={"page_number": page_num, "paragraph_number": para_num}
97
+ )
98
+ )
99
+ print(f"[INFO] Extracted {len(docs)} chunks from PDF for embedding.")
100
+ vectordb = Chroma.from_documents(
101
+ docs, EMBEDDER, persist_directory=str(vect_dir)
102
+ )
103
+ vectordb.persist()
104
+ return vectordb
105
+
106
+ from langchain.tools import Tool
107
+
108
+ def build_retriever_tool(vectorstore):
109
+ # 1) build a retriever (here we ask for top 3 matches)
110
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
111
+
112
+ # 2) wrap it so every result is tagged with page/paragraph
113
+ def custom_search(query: str) -> str:
114
+ docs = retriever.get_relevant_documents(query)
115
+ if not docs:
116
+ return "No relevant passages found."
117
+ out = []
118
+ for d in docs:
119
+ page = d.metadata.get("page_number", "?")
120
+ para = d.metadata.get("paragraph_number", "?")
121
+ txt = d.page_content.replace("\n", " ").strip()
122
+ out.append(f"[Page {page}, Paragraph {para}]: {txt}")
123
+ # join with blank lines so LLM can see separate chunks
124
+ return "\n\n".join(out)
125
+
126
+ # 3) expose that wrapper as a LangChain Tool
127
+ return Tool(
128
+ name="document_search",
129
+ func=custom_search,
130
+ description=(
131
+ "Searches the uploaded PDF for a query and returns each matching "
132
+ "passage prefixed with its page and paragraph number."
133
+ ),
134
+ )
135
+
136
+ def make_generate_query_or_respond(retriever_tool):
137
+ def generate_query_or_respond(state):
138
+ response = (
139
+ LLM
140
+ .bind_tools([retriever_tool]).invoke(state["messages"])
141
+ )
142
+ return {"messages": [response]}
143
+ return generate_query_or_respond
144
+
145
+ GENERATE_PROMPT = (
146
+ "You are an assistant for question-answering tasks. "
147
+ "Use the following pieces of retrieved context to answer the question with reference and page number."
148
+ "attention to the context, and only use it to answer the question. "
149
+ "If you don't know the answer, just say that you don't know. "
150
+ "Question: {question} \n"
151
+ "Context: {context}"
152
+ )
153
+
154
+ def generate_answer(state: MessagesState):
155
+ print(f"[DEBUG] Answer node, messages so far: {state['messages']}")
156
+ question = state["messages"][0].content
157
+ print(f"[DEBUG] Question: {question}")
158
+ context = state["messages"][-1].content
159
+ print(f"[DEBUG] Context: {context}")
160
+ prompt = GENERATE_PROMPT.format(question=question, context=context)
161
+ response = LLM.invoke([{"role": "user", "content": prompt}])
162
+ print(f"[DEBUG] LLM final answer: {response}")
163
+ return {"messages": [response]}
164
+
165
+
166
+ from langgraph.graph import StateGraph, START, END
167
+ from langgraph.prebuilt import ToolNode
168
+ from langgraph.prebuilt import tools_condition
169
+
170
+ def build_agentic_graph(retriever_tool):
171
+ workflow = StateGraph(MessagesState)
172
+
173
+ workflow.add_node("generate_query_or_respond", make_generate_query_or_respond(retriever_tool))
174
+ workflow.add_node("retrieve", ToolNode([retriever_tool]))
175
+ workflow.add_node(generate_answer)
176
+ workflow.add_edge(START, "generate_query_or_respond")
177
+ workflow.add_conditional_edges(
178
+ "generate_query_or_respond",
179
+ tools_condition,
180
+ {
181
+ "tools": "retrieve",
182
+ END: END,
183
+ },
184
+ )
185
+ workflow.add_edge("retrieve", "generate_answer")
186
+ workflow.add_edge("generate_answer", END)
187
+ # workflow.add_edge("retrieve", "agent") # cycle back for multiple tool use if needed
188
+ return workflow.compile()
189
+
190
+ def gradio_agentic_rag(pdf_file, question, history=None):
191
+ pdf_bytes, filename = get_file_bytes_and_name(pdf_file)
192
+ vectordb = load_or_create_chroma(pdf_bytes, filename)
193
+ # retriever_tool = build_retriever_tool(vectordb)
194
+ retriever_tool = build_retriever_tool(vectordb)
195
+ graph = build_agentic_graph(retriever_tool)
196
+ state_messages = []
197
+ if history:
198
+ for turn in history:
199
+ if isinstance(turn, list) or isinstance(turn, tuple):
200
+ if turn[0]:
201
+ state_messages.append({"role": "user", "content": turn[0]})
202
+ if len(turn) > 1 and turn[1]:
203
+ state_messages.append({"role": "assistant", "content": turn[1]})
204
+ state_messages.append({"role": "user", "content": question})
205
+ state = {"messages": state_messages}
206
+
207
+ result = None
208
+ for chunk in graph.stream(state):
209
+ print(f"Chunk: {chunk}")
210
+ for node, update in chunk.items():
211
+ print(f"Node: {node}, Update: {update}")
212
+ last_msg = update["messages"][-1]
213
+ if node == "generate_answer" or (
214
+ node == "generate_query_or_respond" and not update["messages"][-1].tool_calls
215
+ ):
216
+ result = last_msg.content
217
+
218
+ if history is None:
219
+ history = []
220
+ history.append([question, result])
221
+
222
+ return result, history
223
+
224
+
225
+ iface = gr.Interface(
226
+ fn=gradio_agentic_rag,
227
+ inputs=[
228
+ gr.File(label="Upload your PDF"),
229
+ gr.Textbox(label="Ask a question about your PDF"),
230
+ gr.State()
231
+ ],
232
+ outputs=[gr.Textbox(label="Answer from RAG Agent"),
233
+ gr.State()],
234
+ title="DocuCite Agent",
235
+ description="An agentic RAG (Retrieval-Augmented Generation) system that can answer questions about the contents of a PDF document with references to the page and paragraph number.",
236
+ examples=[
237
+ ["DocuCite-Agent/paper.pdf", "What is LoRA? please use the tool"],
238
+ ],
239
+
240
+ )
241
+ if __name__ == "__main__":
242
+ iface.launch(
243
+ mcp_server=True,
244
+ show_error=True,
245
+ show_api=True
246
+ )
247
+
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ pdfplumber
3
+ numpy
4
+ torch
5
+ langchain
6
+ langchain-community
7
+ langchain-openai
8
+ langgraph