Spaces:
Sleeping
Sleeping
| import os | |
| import asyncio | |
| from langchain_community.document_loaders import TextLoader | |
| from langchain.text_splitter import CharacterTextSplitter | |
| from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings | |
| from langchain.vectorstores import FAISS | |
| from langchain.chains import ConversationalRetrievalChain | |
| google_api_key = os.getenv("GOOGLE_API_KEY") | |
| # π’ Event loop safe embeddings initializer | |
| def get_embeddings(): | |
| try: | |
| asyncio.get_running_loop() | |
| except RuntimeError: | |
| loop = asyncio.new_event_loop() | |
| asyncio.set_event_loop(loop) | |
| return GoogleGenerativeAIEmbeddings( | |
| model="models/embedding-001", | |
| google_api_key=google_api_key | |
| ) | |
| # π’ Use loader safely | |
| loader = TextLoader("data.txt") | |
| docs = loader.load() | |
| # π’ Split text into chunks | |
| text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | |
| documents = text_splitter.split_documents(docs) | |
| # π’ Create vectorstore with embeddings | |
| embeddings = get_embeddings() | |
| db = FAISS.from_documents(documents, embeddings) | |
| # π’ Conversational chain | |
| qa = ConversationalRetrievalChain.from_llm( | |
| ChatGoogleGenerativeAI(model="gemini-1.5-flash", google_api_key=google_api_key), | |
| db.as_retriever() | |
| ) | |
| # π’ Function to interact with bot | |
| chat_history = [] | |
| def ask_bot(query: str): | |
| global chat_history | |
| result = qa({"question": query, "chat_history": chat_history}) | |
| chat_history.append((query, result["answer"])) | |
| return result["answer"] | |