Upload llmdolphin.py
Browse files- llmdolphin.py +10 -13
llmdolphin.py
CHANGED
|
@@ -1328,10 +1328,8 @@ def dolphin_respond(
|
|
| 1328 |
try:
|
| 1329 |
progress(0, desc="Processing...")
|
| 1330 |
override_llm_format = get_state(state, "override_llm_format")
|
| 1331 |
-
if override_llm_format:
|
| 1332 |
-
|
| 1333 |
-
else:
|
| 1334 |
-
chat_template = llm_models[model][1]
|
| 1335 |
|
| 1336 |
llm = Llama(
|
| 1337 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
@@ -1431,11 +1429,11 @@ def dolphin_respond_auto(
|
|
| 1431 |
#if not is_japanese(message): return [(None, None)]
|
| 1432 |
progress(0, desc="Processing...")
|
| 1433 |
|
|
|
|
|
|
|
| 1434 |
override_llm_format = get_state(state, "override_llm_format")
|
| 1435 |
-
if override_llm_format:
|
| 1436 |
-
|
| 1437 |
-
else:
|
| 1438 |
-
chat_template = llm_models[model][1]
|
| 1439 |
|
| 1440 |
llm = Llama(
|
| 1441 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
@@ -1499,9 +1497,10 @@ def dolphin_respond_auto(
|
|
| 1499 |
def dolphin_parse_simple(
|
| 1500 |
message: str,
|
| 1501 |
history: list[tuple[str, str]],
|
| 1502 |
-
state: dict,
|
| 1503 |
):
|
| 1504 |
try:
|
|
|
|
| 1505 |
#if not is_japanese(message): return message
|
| 1506 |
dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
|
| 1507 |
if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
|
|
@@ -1539,10 +1538,8 @@ def respond_playground(
|
|
| 1539 |
):
|
| 1540 |
try:
|
| 1541 |
override_llm_format = get_state(state, "override_llm_format")
|
| 1542 |
-
if override_llm_format:
|
| 1543 |
-
|
| 1544 |
-
else:
|
| 1545 |
-
chat_template = llm_models[model][1]
|
| 1546 |
|
| 1547 |
llm = Llama(
|
| 1548 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
|
|
| 1328 |
try:
|
| 1329 |
progress(0, desc="Processing...")
|
| 1330 |
override_llm_format = get_state(state, "override_llm_format")
|
| 1331 |
+
if override_llm_format: chat_template = override_llm_format
|
| 1332 |
+
else: chat_template = llm_models[model][1]
|
|
|
|
|
|
|
| 1333 |
|
| 1334 |
llm = Llama(
|
| 1335 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
|
|
| 1429 |
#if not is_japanese(message): return [(None, None)]
|
| 1430 |
progress(0, desc="Processing...")
|
| 1431 |
|
| 1432 |
+
print(state)#
|
| 1433 |
+
|
| 1434 |
override_llm_format = get_state(state, "override_llm_format")
|
| 1435 |
+
if override_llm_format: chat_template = override_llm_format
|
| 1436 |
+
else: chat_template = llm_models[model][1]
|
|
|
|
|
|
|
| 1437 |
|
| 1438 |
llm = Llama(
|
| 1439 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|
|
|
|
| 1497 |
def dolphin_parse_simple(
|
| 1498 |
message: str,
|
| 1499 |
history: list[tuple[str, str]],
|
| 1500 |
+
state: dict = {},
|
| 1501 |
):
|
| 1502 |
try:
|
| 1503 |
+
print(state)#
|
| 1504 |
#if not is_japanese(message): return message
|
| 1505 |
dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
|
| 1506 |
if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
|
|
|
|
| 1538 |
):
|
| 1539 |
try:
|
| 1540 |
override_llm_format = get_state(state, "override_llm_format")
|
| 1541 |
+
if override_llm_format: chat_template = override_llm_format
|
| 1542 |
+
else: chat_template = llm_models[model][1]
|
|
|
|
|
|
|
| 1543 |
|
| 1544 |
llm = Llama(
|
| 1545 |
model_path=str(Path(f"{llm_models_dir}/{model}")),
|