Spaces:
Sleeping
Sleeping
File size: 1,501 Bytes
1698ee3 bfa647e 1698ee3 bfa647e 1698ee3 bfa647e 1698ee3 bfa647e 1698ee3 bfa647e 1698ee3 bfa647e ca0f9da bfa647e 1698ee3 bfa647e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import os
import asyncio
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
google_api_key = os.getenv("GOOGLE_API_KEY")
# π’ Event loop safe embeddings initializer
def get_embeddings():
try:
asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return GoogleGenerativeAIEmbeddings(
model="models/embedding-001",
google_api_key=google_api_key
)
# π’ Use loader safely
loader = TextLoader("data.txt")
docs = loader.load()
# π’ Split text into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(docs)
# π’ Create vectorstore with embeddings
embeddings = get_embeddings()
db = FAISS.from_documents(documents, embeddings)
# π’ Conversational chain
qa = ConversationalRetrievalChain.from_llm(
ChatGoogleGenerativeAI(model="gemini-1.5-flash", google_api_key=google_api_key),
db.as_retriever()
)
# π’ Function to interact with bot
chat_history = []
def ask_bot(query: str):
global chat_history
result = qa({"question": query, "chat_history": chat_history})
chat_history.append((query, result["answer"]))
return result["answer"]
|