Spaces:
Runtime error
Runtime error
Update config.txt
Browse files- config.txt +4 -4
config.txt
CHANGED
|
@@ -53,14 +53,14 @@ video_summarize_prompt = "Above is the transcript of a video. Please read throug
|
|
| 53 |
type = sqlite
|
| 54 |
sqlite_path = Databases/media_summary.db
|
| 55 |
backup_path = ./tldw_DB_Backups/
|
| 56 |
-
#Path to the backup location for the database. If the path does not exist, the backup will not be created.
|
| 57 |
elasticsearch_host = localhost
|
| 58 |
elasticsearch_port = 9200
|
| 59 |
-
# Additionally you can use elasticsearch as the database type, just replace `sqlite` with `elasticsearch` for `type` and provide the `elasticsearch_host` and `elasticsearch_port` of your configured ES instance.
|
| 60 |
chroma_db_path = Databases/chroma_db
|
| 61 |
prompts_db_path = Databases/prompts.db
|
| 62 |
rag_qa_db_path = Databases/RAG_QA_Chat.db
|
| 63 |
character_db_path = Databases/chatDB.db
|
|
|
|
|
|
|
| 64 |
|
| 65 |
[Embeddings]
|
| 66 |
embedding_provider = openai
|
|
@@ -77,13 +77,13 @@ overlap = 200
|
|
| 77 |
|
| 78 |
[Chunking]
|
| 79 |
method = words
|
| 80 |
-
# 'method' Can be 'words' / 'sentences' / 'paragraphs' / 'semantic' / 'tokens'
|
| 81 |
max_size = 400
|
| 82 |
overlap = 200
|
| 83 |
adaptive = false
|
| 84 |
-
# Use ntlk+punkt to split text into sentences and then ID average sentence length and set that as the chunk size
|
| 85 |
multi_level = false
|
| 86 |
language = english
|
|
|
|
|
|
|
| 87 |
|
| 88 |
[Metrics]
|
| 89 |
log_file_path =
|
|
|
|
| 53 |
type = sqlite
|
| 54 |
sqlite_path = Databases/media_summary.db
|
| 55 |
backup_path = ./tldw_DB_Backups/
|
|
|
|
| 56 |
elasticsearch_host = localhost
|
| 57 |
elasticsearch_port = 9200
|
|
|
|
| 58 |
chroma_db_path = Databases/chroma_db
|
| 59 |
prompts_db_path = Databases/prompts.db
|
| 60 |
rag_qa_db_path = Databases/RAG_QA_Chat.db
|
| 61 |
character_db_path = Databases/chatDB.db
|
| 62 |
+
#Path to the backup location for the database. If the path does not exist, the backup will not be created.
|
| 63 |
+
# Additionally you can use elasticsearch as the database type, just replace `sqlite` with `elasticsearch` for `type` and provide the `elasticsearch_host` and `elasticsearch_port` of your configured ES instance.
|
| 64 |
|
| 65 |
[Embeddings]
|
| 66 |
embedding_provider = openai
|
|
|
|
| 77 |
|
| 78 |
[Chunking]
|
| 79 |
method = words
|
|
|
|
| 80 |
max_size = 400
|
| 81 |
overlap = 200
|
| 82 |
adaptive = false
|
|
|
|
| 83 |
multi_level = false
|
| 84 |
language = english
|
| 85 |
+
# 'method' Can be 'words' / 'sentences' / 'paragraphs' / 'semantic' / 'tokens'
|
| 86 |
+
# Use ntlk+punkt to split text into sentences and then ID average sentence length and set that as the chunk size
|
| 87 |
|
| 88 |
[Metrics]
|
| 89 |
log_file_path =
|