Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .env +1 -0
- app.py +11 -3
- daily_usage.json +1 -0
.env
CHANGED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
DEEPSEEK_API_KEY=sk-4e8ed6219f2a4a46a0af24eac64d1d90
|
app.py
CHANGED
|
@@ -6,6 +6,8 @@ import json
|
|
| 6 |
import os
|
| 7 |
import datetime
|
| 8 |
from pathlib import Path
|
|
|
|
|
|
|
| 9 |
|
| 10 |
def load_usage():
|
| 11 |
if USAGE_FILE.exists():
|
|
@@ -23,6 +25,8 @@ def save_usage(total_cost):
|
|
| 23 |
}, f)
|
| 24 |
|
| 25 |
load_dotenv(override=True)
|
|
|
|
|
|
|
| 26 |
# client = OpenAI()
|
| 27 |
|
| 28 |
client = OpenAI(
|
|
@@ -32,12 +36,14 @@ client = OpenAI(
|
|
| 32 |
# === CONFIGURABLE BUDGET LIMIT (USD) ===
|
| 33 |
USAGE_LIMIT_PER_DAY = 0.1 # USD
|
| 34 |
USAGE_FILE = Path("daily_usage.json")
|
|
|
|
| 35 |
total_cost = load_usage()
|
| 36 |
|
| 37 |
|
| 38 |
# Pricing reference (as of 2024-06): adjust if needed
|
| 39 |
PRICES = {
|
| 40 |
-
"gpt-4o-mini": {"input": 0.00025 / 1000, "output": 0.00025 / 1000}
|
|
|
|
| 41 |
}
|
| 42 |
|
| 43 |
|
|
@@ -63,6 +69,7 @@ system_prompt += f"\n\n## Summary:\n{summary}\n\n## Resume:\n{resume}\n\n"
|
|
| 63 |
system_prompt += f"With this context, please chat with the user, always staying in character as {name}."
|
| 64 |
def chat(message, history):
|
| 65 |
global total_cost
|
|
|
|
| 66 |
if total_cost >= USAGE_LIMIT_PER_DAY:
|
| 67 |
return "Daily usage limit reached. Please try again tomorrow."
|
| 68 |
|
|
@@ -71,9 +78,10 @@ def chat(message, history):
|
|
| 71 |
usage = response.usage
|
| 72 |
input_tokens = usage.prompt_tokens
|
| 73 |
output_tokens = usage.completion_tokens
|
| 74 |
-
cost = (input_tokens * PRICES["
|
| 75 |
-
output_tokens * PRICES["
|
| 76 |
total_cost += cost
|
|
|
|
| 77 |
return response.choices[0].message.content
|
| 78 |
gr.ChatInterface(chat, type="messages").launch()
|
| 79 |
|
|
|
|
| 6 |
import os
|
| 7 |
import datetime
|
| 8 |
from pathlib import Path
|
| 9 |
+
print("Current working directory:", os.getcwd())
|
| 10 |
+
|
| 11 |
|
| 12 |
def load_usage():
|
| 13 |
if USAGE_FILE.exists():
|
|
|
|
| 25 |
}, f)
|
| 26 |
|
| 27 |
load_dotenv(override=True)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
# client = OpenAI()
|
| 31 |
|
| 32 |
client = OpenAI(
|
|
|
|
| 36 |
# === CONFIGURABLE BUDGET LIMIT (USD) ===
|
| 37 |
USAGE_LIMIT_PER_DAY = 0.1 # USD
|
| 38 |
USAGE_FILE = Path("daily_usage.json")
|
| 39 |
+
print(f"Expected usage file at: {USAGE_FILE.resolve()}")
|
| 40 |
total_cost = load_usage()
|
| 41 |
|
| 42 |
|
| 43 |
# Pricing reference (as of 2024-06): adjust if needed
|
| 44 |
PRICES = {
|
| 45 |
+
"gpt-4o-mini": {"input": 0.00025 / 1000, "output": 0.00025 / 1000},
|
| 46 |
+
"deepseek-chat": {"input": 0.00027 / 1000, "output": 0.00007 / 1000}
|
| 47 |
}
|
| 48 |
|
| 49 |
|
|
|
|
| 69 |
system_prompt += f"With this context, please chat with the user, always staying in character as {name}."
|
| 70 |
def chat(message, history):
|
| 71 |
global total_cost
|
| 72 |
+
total_cost = load_usage()
|
| 73 |
if total_cost >= USAGE_LIMIT_PER_DAY:
|
| 74 |
return "Daily usage limit reached. Please try again tomorrow."
|
| 75 |
|
|
|
|
| 78 |
usage = response.usage
|
| 79 |
input_tokens = usage.prompt_tokens
|
| 80 |
output_tokens = usage.completion_tokens
|
| 81 |
+
cost = (input_tokens * PRICES["deepseek-chat"]["input"] +
|
| 82 |
+
output_tokens * PRICES["deepseek-chat"]["output"])
|
| 83 |
total_cost += cost
|
| 84 |
+
save_usage(total_cost)
|
| 85 |
return response.choices[0].message.content
|
| 86 |
gr.ChatInterface(chat, type="messages").launch()
|
| 87 |
|
daily_usage.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"date": "2025-07-09", "total_cost": 0.012827749999999999}
|