Parthiban97 commited on
Commit
47270be
·
verified ·
1 Parent(s): 283c85a

Upload 3 files

Browse files
Files changed (3) hide show
  1. .streamlit/config.toml +17 -0
  2. app.py +264 -0
  3. requirements.txt +7 -0
.streamlit/config.toml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor="#c77f7f"
3
+ backgroundColor="#af99f1"
4
+ secondaryBackgroundColor="#eedacf"
5
+ textColor="#0e1212"
6
+ font="monospace"
7
+
8
+
9
+
10
+
11
+
12
+
13
+
14
+
15
+
16
+
17
+
app.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import tempfile
4
+ from dotenv import load_dotenv
5
+ from llama_parse import LlamaParse
6
+ from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
7
+ from llama_index.embeddings.gemini import GeminiEmbedding
8
+ from llama_index.llms.groq import Groq
9
+ from llama_index.core.retrievers import VectorIndexRetriever
10
+ from llama_index.core.postprocessor import SimilarityPostprocessor
11
+ from llama_index.core.query_engine import RetrieverQueryEngine
12
+ from langchain_core.messages import HumanMessage, AIMessage
13
+ from llama_index.core.memory import ChatMemoryBuffer
14
+ import time
15
+
16
+ load_dotenv()
17
+
18
+ st.set_page_config(page_title="Chat with Documents", page_icon=":books:")
19
+ st.title("DocMulti Chat Assistant Using LlamaIndex 🦙")
20
+
21
+ # Initialize chat history in session state
22
+ if 'chat_history' not in st.session_state:
23
+ st.session_state.chat_history = []
24
+
25
+ # Initialize memory buffer
26
+ if 'memory' not in st.session_state:
27
+ st.session_state.memory = ChatMemoryBuffer.from_defaults(token_limit=4090)
28
+
29
+ SUPPORTED_EXTENSIONS = [
30
+ '.pdf', '.602', '.abw', '.cgm', '.cwk', '.doc', '.docx', '.docm', '.dot', '.dotm',
31
+ '.hwp', '.key', '.lwp', '.mw', '.mcw', '.pages', '.pbd', '.ppt', '.pptm', '.pptx',
32
+ '.pot', '.potm', '.potx', '.rtf', '.sda', '.sdd', '.sdp', '.sdw', '.sgl', '.sti',
33
+ '.sxi', '.sxw', '.stw', '.sxg', '.txt', '.uof', '.uop', '.uot', '.vor', '.wpd',
34
+ '.wps', '.xml', '.zabw', '.epub', '.jpg', '.jpeg', '.png', '.gif', '.bmp', '.svg',
35
+ '.tiff', '.webp', '.htm', '.html', '.xlsx', '.xls', '.xlsm', '.xlsb', '.xlw', '.csv',
36
+ '.dif', '.sylk', '.slk', '.prn', '.numbers', '.et', '.ods', '.fods', '.uos1', '.uos2',
37
+ '.dbf', '.wk1', '.wk2', '.wk3', '.wk4', '.wks', '.123', '.wq1', '.wq2', '.wb1', '.wb2',
38
+ '.wb3', '.qpw', '.xlr', '.eth', '.tsv'
39
+ ]
40
+
41
+ # Sidebar configuration
42
+ if 'config' not in st.session_state:
43
+ with st.sidebar:
44
+ st.header("Configuration")
45
+ st.markdown("Enter your API keys below:")
46
+
47
+ # GROQ API Key input
48
+ st.session_state.groq_api_key = st.text_input(
49
+ "Enter your GROQ API Key",
50
+ type="password",
51
+ help="Get your API key from [GROQ Console](https://console.groq.com/keys)",
52
+ value=st.session_state.get('groq_api_key', '')
53
+ )
54
+
55
+ # Google API Key input
56
+ st.session_state.google_api_key = st.text_input(
57
+ "Enter your Google API Key",
58
+ type="password",
59
+ help="Get your API key from [Google AI Studio](https://aistudio.google.com/app/apikey)",
60
+ value=st.session_state.get('google_api_key', '')
61
+ )
62
+
63
+ # Llama Cloud API Key input
64
+ st.session_state.llama_cloud_api_key = st.text_input(
65
+ "Enter your Llama Cloud API Key",
66
+ type="password",
67
+ help="Get your API key from [Llama Cloud](https://cloud.llamaindex.ai/api-key)",
68
+ value=st.session_state.get('llama_cloud_api_key', '')
69
+ )
70
+
71
+ # Set environment variables
72
+ os.environ["GROQ_API_KEY"] = st.session_state.groq_api_key
73
+ os.environ["GOOGLE_API_KEY"] = st.session_state.google_api_key
74
+ os.environ["LLAMA_CLOUD_API_KEY"] = st.session_state.llama_cloud_api_key
75
+
76
+ # Model selection
77
+ model_options = [
78
+ "llama-3.1-70b-versatile",
79
+ "llama-3.1-8b-instant",
80
+ "llama3-8b-8192",
81
+ "llama3-70b-8192",
82
+ "mixtral-8x7b-32768",
83
+ "gemma2-9b-it"
84
+ ]
85
+ st.session_state.selected_model = st.selectbox(
86
+ "Select any Groq Model",
87
+ model_options
88
+ )
89
+
90
+ # Document upload
91
+ st.session_state.uploaded_files = st.file_uploader(
92
+ "Choose files",
93
+ accept_multiple_files=True,
94
+ type=SUPPORTED_EXTENSIONS,
95
+ key="file_uploader"
96
+ )
97
+
98
+ # Checkbox for LlamaParse usage
99
+ st.session_state.use_llama_parse = st.checkbox(
100
+ "Use LlamaParse for complex documents (graphs, tables, etc.)",
101
+ value=st.session_state.get('use_llama_parse', False)
102
+ )
103
+
104
+ with st.expander("Advanced Options"):
105
+ # Parsing instruction input
106
+ st.session_state.parsing_instruction = st.text_area(
107
+ "Custom Parsing Instruction",
108
+ value=st.session_state.get('parsing_instruction', "Extract all information"),
109
+ help="Enter custom instructions for document parsing"
110
+ )
111
+
112
+ # Custom prompt template input
113
+ st.session_state.custom_prompt_template = st.text_area(
114
+ "Custom Prompt Template",
115
+ placeholder="Enter your custom prompt here...(Optional)",
116
+ value=st.session_state.get('custom_prompt_template', '')
117
+ )
118
+
119
+ # Step 3: Load and parse documents
120
+ def parse_and_index_documents(uploaded_files, use_llama_parse, parsing_instruction):
121
+ all_documents = []
122
+
123
+ if use_llama_parse and os.environ.get("LLAMA_CLOUD_API_KEY"):
124
+ with st.spinner("Using LlamaParse for document parsing"):
125
+ parser = LlamaParse(result_type="markdown", parsing_instruction=parsing_instruction)
126
+ for uploaded_file in uploaded_files:
127
+ file_info_placeholder = st.empty()
128
+ file_info_placeholder.info(f"Processing file: {uploaded_file.name}")
129
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.name)[-1]) as tmp_file:
130
+ tmp_file.write(uploaded_file.getvalue())
131
+ tmp_file_path = tmp_file.name
132
+
133
+ try:
134
+ parsed_documents = parser.load_data(tmp_file_path)
135
+ all_documents.extend(parsed_documents)
136
+ except Exception as e:
137
+ st.error(f"Error parsing {uploaded_file.name}: {str(e)}")
138
+ finally:
139
+ os.remove(tmp_file_path)
140
+ time.sleep(4)
141
+ file_info_placeholder.empty()
142
+ else:
143
+ with st.spinner("Using SimpleDirectoryReader for document parsing"):
144
+ for uploaded_file in uploaded_files:
145
+ file_info_placeholder = st.empty()
146
+ file_info_placeholder.info(f"Processing file: {uploaded_file.name}")
147
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.name)[-1]) as tmp_file:
148
+ tmp_file.write(uploaded_file.getvalue())
149
+ tmp_file_path = tmp_file.name
150
+
151
+ try:
152
+ reader = SimpleDirectoryReader(input_files=[tmp_file_path])
153
+ docs = reader.load_data()
154
+ all_documents.extend(docs)
155
+ except Exception as e:
156
+ st.error(f"Error loading {uploaded_file.name}: {str(e)}")
157
+ finally:
158
+ os.remove(tmp_file_path)
159
+ time.sleep(4)
160
+ file_info_placeholder.empty()
161
+
162
+ if not all_documents:
163
+ st.error("No valid documents found.")
164
+ return None
165
+
166
+ with st.spinner("Creating Vector Store Index..."):
167
+ try:
168
+ groq_llm = Groq(model=st.session_state.selected_model)
169
+ gemini_embed_model = GeminiEmbedding(model_name="models/embedding-001")
170
+
171
+ Settings.llm = groq_llm
172
+ Settings.embed_model = gemini_embed_model
173
+ Settings.chunk_size = 2048
174
+
175
+ index = VectorStoreIndex.from_documents(all_documents, embed_model=gemini_embed_model)
176
+
177
+ # Create a retriever from the index
178
+ retriever = VectorIndexRetriever(index=index, similarity_top_k=2)
179
+
180
+ # Create a postprocessor
181
+ postprocessor = SimilarityPostprocessor(similarity_cutoff=0.50)
182
+
183
+ # Create the query engine
184
+ query_engine = RetrieverQueryEngine(
185
+ retriever=retriever,
186
+ node_postprocessors=[postprocessor]
187
+ )
188
+
189
+ # Create a chat engine with memory, using the custom query engine
190
+ chat_engine = index.as_chat_engine(
191
+ chat_mode="condense_question",
192
+ memory=st.session_state.memory,
193
+ verbose=False
194
+ )
195
+
196
+ # Set the query engine for the chat engine
197
+ chat_engine.query_engine = query_engine
198
+ return chat_engine
199
+
200
+ except Exception as e:
201
+ st.error(f"Error building index: {str(e)}")
202
+ return None
203
+
204
+
205
+ st.success("Data Processed. Ready to answer your question!")
206
+
207
+
208
+ # Step 5: Start document indexing
209
+ if st.sidebar.button("Start Document Indexing"):
210
+ if st.session_state.uploaded_files:
211
+ try:
212
+ chat_engine = parse_and_index_documents(st.session_state.uploaded_files, st.session_state.use_llama_parse, st.session_state.parsing_instruction)
213
+ if chat_engine:
214
+ st.session_state.chat_engine = chat_engine
215
+ st.success("Data Processed.Ready to answer your question!!")
216
+ else:
217
+ st.error("Failed to create data index store.")
218
+ except Exception as e:
219
+ st.error(f"An error occurred during indexing: {str(e)}")
220
+ else:
221
+ st.warning("Please upload at least one file.")
222
+
223
+ # Step 6: Querying logic
224
+ def get_response(query, chat_engine, custom_prompt):
225
+ try:
226
+ # Prepare the query
227
+ if custom_prompt:
228
+ query = f"{custom_prompt}\n\nQuestion: {query}"
229
+
230
+ # Use the chat engine to get a response
231
+ response = chat_engine.chat(query)
232
+
233
+ # If response is empty or not valid
234
+ if not response or not response.response:
235
+ return "I couldn't find a relevant answer. Could you rephrase?"
236
+
237
+ return response.response
238
+ except Exception as e:
239
+ st.error(f"Error processing query: {str(e)}")
240
+ return "An error occurred."
241
+
242
+
243
+ st.markdown("---")
244
+ user_query = st.chat_input("Enter Your Question")
245
+
246
+ if user_query and "chat_engine" in st.session_state:
247
+ # Add user's message to chat history
248
+ st.session_state.chat_history.append({"role": "user", "content": user_query})
249
+
250
+ # Get response from the chat engine
251
+ response = get_response(user_query, st.session_state.chat_engine, st.session_state.custom_prompt_template)
252
+
253
+ if response:
254
+ # Add AI's response to chat history
255
+ st.session_state.chat_history.append({"role": "assistant", "content": str(response)})
256
+
257
+ # Display chat history
258
+ for message in st.session_state.chat_history:
259
+ if message["role"] == "user":
260
+ st.chat_message("user").write(message["content"])
261
+ elif message["role"] == "assistant":
262
+ st.chat_message("assistant").write(message["content"])
263
+ else:
264
+ st.warning("Unable to process the query.")
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ llama-index
2
+ openai
3
+ pypdf
4
+ python-dotenv
5
+ llama-index-llms-groq
6
+ llama-index-llms-gemini
7
+ llama-index-embeddings-gemini