Spaces:
Sleeping
Sleeping
Commit
·
cd9c23a
1
Parent(s):
c6ae2c4
App Added
Browse files- app.py +47 -0
- requirements.txt +7 -0
- responses.json +0 -0
app.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from haystack.document_stores import FAISSDocumentStore
|
| 4 |
+
from haystack.document_stores import InMemoryDocumentStore
|
| 5 |
+
from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
|
| 6 |
+
from haystack.nodes import DensePassageRetriever
|
| 7 |
+
from haystack.nodes import FARMReader
|
| 8 |
+
from haystack.pipelines import ExtractiveQAPipeline
|
| 9 |
+
|
| 10 |
+
st.header("DPR on Supreme Court Judgements (Capital Gain)")
|
| 11 |
+
|
| 12 |
+
with open("responses.json", 'r') as f:
|
| 13 |
+
data = json.load(f)
|
| 14 |
+
|
| 15 |
+
documents = [
|
| 16 |
+
{
|
| 17 |
+
"content": doc["text"],
|
| 18 |
+
"meta": {
|
| 19 |
+
"name": doc["title"],
|
| 20 |
+
"url": doc["url"]
|
| 21 |
+
}
|
| 22 |
+
} for doc in data
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
document_store = FAISSDocumentStore(embedding_dim=768, faiss_index_factory_str="Flat")
|
| 26 |
+
# document_store = InMemoryDocumentStore()
|
| 27 |
+
document_store.write_documents(documents)
|
| 28 |
+
|
| 29 |
+
retriever = DensePassageRetriever(
|
| 30 |
+
document_store=document_store,
|
| 31 |
+
query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
|
| 32 |
+
passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
document_store.update_embeddings(retriever)
|
| 36 |
+
document_store.save("faiss_index")
|
| 37 |
+
|
| 38 |
+
reader = FARMReader(model_name_or_path="deepset/bert-base-cased-squad2")
|
| 39 |
+
|
| 40 |
+
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever)
|
| 41 |
+
|
| 42 |
+
query = st.input()
|
| 43 |
+
# query = "What is the subject matter of the petition in the Sadanand S. Varde case?"
|
| 44 |
+
result = pipeline.run(query=query, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})
|
| 45 |
+
|
| 46 |
+
for answer in result['answers']:
|
| 47 |
+
st.markdown(f"=====================\nAnswer: {answer.answer}\nContext: {answer.context}\nScore: {answer.score}")
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
faiss-cpu
|
| 3 |
+
transformers
|
| 4 |
+
sentence-transformers
|
| 5 |
+
farm-haystack
|
| 6 |
+
farm-haystack[faiss]
|
| 7 |
+
farm-haystack[inference]
|
responses.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|