Uamir commited on
Commit
bfa647e
Β·
verified Β·
1 Parent(s): fe5eb32

Update chatbot_backend.py

Browse files
Files changed (1) hide show
  1. chatbot_backend.py +40 -32
chatbot_backend.py CHANGED
@@ -1,41 +1,49 @@
1
  import os
2
- from langchain_community.llms import Ollama
3
- from langchain_community.embeddings import OllamaEmbeddings
4
  from langchain_community.document_loaders import TextLoader
5
- from langchain.text_splitter import RecursiveCharacterTextSplitter
6
- from langchain_community.vectorstores import FAISS
7
- from langchain.memory import ConversationBufferMemory
8
  from langchain.chains import ConversationalRetrievalChain
9
 
10
- # βœ… Memory
11
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
12
 
13
- # βœ… LLM (Ollama model, e.g. llama3, mistral, phi3)
14
- llm = Ollama(model="llama3")
 
 
 
 
 
 
 
 
 
 
15
 
16
- # βœ… Load + Split Docs
17
- loader = TextLoader("data.txt", encoding="utf-8")
18
  docs = loader.load()
19
- splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
20
- chunks = splitter.split_documents(docs)
21
-
22
- # βœ… Embeddings + VectorStore
23
- embeddings = OllamaEmbeddings(model="llama3")
24
- vectorstore = FAISS.from_documents(documents=chunks, embedding=embeddings)
25
- retriever = vectorstore.as_retriever(search_kwargs={"k": 2})
26
-
27
- # βœ… Conversational Retrieval Chain (with memory)
28
- conversational_chain = ConversationalRetrievalChain.from_llm(
29
- llm=llm,
30
- retriever=retriever,
31
- memory=memory,
32
- return_source_documents=False
33
  )
34
 
35
- # βœ… Ask bot function
36
- def ask_bot(query: str) -> str:
37
- try:
38
- response = conversational_chain.invoke({"question": query})
39
- return response["answer"]
40
- except Exception as e:
41
- return f"Error: {str(e)}"
 
 
1
  import os
2
+ import asyncio
 
3
  from langchain_community.document_loaders import TextLoader
4
+ from langchain.text_splitter import CharacterTextSplitter
5
+ from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
6
+ from langchain.vectorstores import FAISS
7
  from langchain.chains import ConversationalRetrievalChain
8
 
9
+ google_api_key = os.getenv("GOOGLE_API_KEY")
 
10
 
11
+ # 🟒 Event loop safe embeddings initializer
12
+ def get_embeddings():
13
+ try:
14
+ asyncio.get_running_loop()
15
+ except RuntimeError:
16
+ loop = asyncio.new_event_loop()
17
+ asyncio.set_event_loop(loop)
18
+
19
+ return GoogleGenerativeAIEmbeddings(
20
+ model="models/embedding-001",
21
+ google_api_key=google_api_key
22
+ )
23
 
24
+ # 🟒 Use loader safely
25
+ loader = TextLoader("data.txt")
26
  docs = loader.load()
27
+
28
+ # 🟒 Split text into chunks
29
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
30
+ documents = text_splitter.split_documents(docs)
31
+
32
+ # 🟒 Create vectorstore with embeddings
33
+ embeddings = get_embeddings()
34
+ db = FAISS.from_documents(documents, embeddings)
35
+
36
+ # 🟒 Conversational chain
37
+ qa = ConversationalRetrievalChain.from_llm(
38
+ ChatGoogleGenerativeAI(model="gemini-2.5-flash", google_api_key=google_api_key),
39
+ db.as_retriever()
 
40
  )
41
 
42
+ # 🟒 Function to interact with bot
43
+ chat_history = []
44
+
45
+ def ask_bot(query: str):
46
+ global chat_history
47
+ result = qa({"question": query, "chat_history": chat_history})
48
+ chat_history.append((query, result["answer"]))
49
+ return result["answer"]