Spaces:
Runtime error
Runtime error
Update App_Function_Libraries/RAG/RAG_Libary_2.py
Browse files
App_Function_Libraries/RAG/RAG_Libary_2.py
CHANGED
|
@@ -35,6 +35,63 @@ config.read('config.txt')
|
|
| 35 |
|
| 36 |
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
|
|
|
|
| 35 |
|
| 36 |
|
| 37 |
|
| 38 |
+
# RAG Search with keyword filtering
|
| 39 |
+
def enhanced_rag_pipeline(query: str, api_choice: str, keywords: str = None) -> Dict[str, Any]:
|
| 40 |
+
try:
|
| 41 |
+
# Load embedding provider from config, or fallback to 'openai'
|
| 42 |
+
embedding_provider = config.get('Embeddings', 'provider', fallback='openai')
|
| 43 |
+
|
| 44 |
+
# Log the provider used
|
| 45 |
+
logging.debug(f"Using embedding provider: {embedding_provider}")
|
| 46 |
+
|
| 47 |
+
# Process keywords if provided
|
| 48 |
+
keyword_list = [k.strip().lower() for k in keywords.split(',')] if keywords else []
|
| 49 |
+
logging.debug(f"enhanced_rag_pipeline - Keywords: {keyword_list}")
|
| 50 |
+
|
| 51 |
+
# Fetch relevant media IDs based on keywords if keywords are provided
|
| 52 |
+
relevant_media_ids = fetch_relevant_media_ids(keyword_list) if keyword_list else None
|
| 53 |
+
logging.debug(f"enhanced_rag_pipeline - relevant media IDs: {relevant_media_ids}")
|
| 54 |
+
|
| 55 |
+
# Perform vector search
|
| 56 |
+
vector_results = perform_vector_search(query, relevant_media_ids)
|
| 57 |
+
logging.debug(f"enhanced_rag_pipeline - Vector search results: {vector_results}")
|
| 58 |
+
|
| 59 |
+
# Perform full-text search
|
| 60 |
+
fts_results = perform_full_text_search(query, relevant_media_ids)
|
| 61 |
+
logging.debug(f"enhanced_rag_pipeline - Full-text search results: {fts_results}")
|
| 62 |
+
|
| 63 |
+
# Combine results
|
| 64 |
+
all_results = vector_results + fts_results
|
| 65 |
+
# FIXME
|
| 66 |
+
if not all_results:
|
| 67 |
+
logging.info(f"No results found. Query: {query}, Keywords: {keywords}")
|
| 68 |
+
return {
|
| 69 |
+
"answer": "I couldn't find any relevant information based on your query and keywords.",
|
| 70 |
+
"context": ""
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
# FIXME - Apply Re-Ranking of results here
|
| 74 |
+
apply_re_ranking = False
|
| 75 |
+
if apply_re_ranking:
|
| 76 |
+
# Implement re-ranking logic here
|
| 77 |
+
pass
|
| 78 |
+
# Extract content from results
|
| 79 |
+
context = "\n".join([result['content'] for result in all_results[:10]]) # Limit to top 10 results
|
| 80 |
+
logging.debug(f"Context length: {len(context)}")
|
| 81 |
+
logging.debug(f"Context: {context[:200]}")
|
| 82 |
+
# Generate answer using the selected API
|
| 83 |
+
answer = generate_answer(api_choice, context, query)
|
| 84 |
+
|
| 85 |
+
return {
|
| 86 |
+
"answer": answer,
|
| 87 |
+
"context": context
|
| 88 |
+
}
|
| 89 |
+
except Exception as e:
|
| 90 |
+
logging.error(f"Error in enhanced_rag_pipeline: {str(e)}")
|
| 91 |
+
return {
|
| 92 |
+
"answer": "An error occurred while processing your request.",
|
| 93 |
+
"context": ""
|
| 94 |
+
}
|
| 95 |
|
| 96 |
|
| 97 |
|