Spaces:
Sleeping
Sleeping
new_llm
Browse files- analyzer.py +3 -2
- chatbot_page.py +7 -4
- test.py +23 -0
analyzer.py
CHANGED
|
@@ -8,7 +8,8 @@ def analyze_code(code: str) -> str:
|
|
| 8 |
Returns the analysis as a string.
|
| 9 |
"""
|
| 10 |
from openai import OpenAI
|
| 11 |
-
client = OpenAI()
|
|
|
|
| 12 |
system_prompt = (
|
| 13 |
"You are a helpful assistant. Analyze the code given to you. "
|
| 14 |
"Return your response strictly in JSON format with the following keys: "
|
|
@@ -23,7 +24,7 @@ def analyze_code(code: str) -> str:
|
|
| 23 |
"}"
|
| 24 |
)
|
| 25 |
response = client.chat.completions.create(
|
| 26 |
-
model="
|
| 27 |
messages=[
|
| 28 |
{"role": "system", "content": system_prompt},
|
| 29 |
{"role": "user", "content": code}
|
|
|
|
| 8 |
Returns the analysis as a string.
|
| 9 |
"""
|
| 10 |
from openai import OpenAI
|
| 11 |
+
client = OpenAI(api_key=os.getenv("modal_api"))
|
| 12 |
+
client.base_url = os.getenv("base_url")
|
| 13 |
system_prompt = (
|
| 14 |
"You are a helpful assistant. Analyze the code given to you. "
|
| 15 |
"Return your response strictly in JSON format with the following keys: "
|
|
|
|
| 24 |
"}"
|
| 25 |
)
|
| 26 |
response = client.chat.completions.create(
|
| 27 |
+
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16", # Updated model
|
| 28 |
messages=[
|
| 29 |
{"role": "system", "content": system_prompt},
|
| 30 |
{"role": "user", "content": code}
|
chatbot_page.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
# from analyzer import analyze_code
|
| 3 |
|
| 4 |
# System prompt for the chatbot
|
|
@@ -15,7 +16,8 @@ conversation_history = []
|
|
| 15 |
# Function to handle chat
|
| 16 |
def chat_with_user(user_message, history):
|
| 17 |
from openai import OpenAI
|
| 18 |
-
client = OpenAI()
|
|
|
|
| 19 |
# Build the message list for the LLM
|
| 20 |
messages = [
|
| 21 |
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT}
|
|
@@ -26,7 +28,7 @@ def chat_with_user(user_message, history):
|
|
| 26 |
messages.append({"role": "assistant", "content": msg[1]})
|
| 27 |
messages.append({"role": "user", "content": user_message})
|
| 28 |
response = client.chat.completions.create(
|
| 29 |
-
model="
|
| 30 |
messages=messages,
|
| 31 |
max_tokens=256,
|
| 32 |
temperature=0.7
|
|
@@ -38,7 +40,8 @@ def chat_with_user(user_message, history):
|
|
| 38 |
def extract_keywords_from_conversation(history):
|
| 39 |
print("Extracting keywords from conversation...")
|
| 40 |
from openai import OpenAI
|
| 41 |
-
client = OpenAI()
|
|
|
|
| 42 |
# Combine all user and assistant messages into a single string
|
| 43 |
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]])
|
| 44 |
system_prompt = (
|
|
@@ -50,7 +53,7 @@ def extract_keywords_from_conversation(history):
|
|
| 50 |
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search."
|
| 51 |
)
|
| 52 |
response = client.chat.completions.create(
|
| 53 |
-
model="
|
| 54 |
messages=[
|
| 55 |
{"role": "system", "content": system_prompt},
|
| 56 |
{"role": "user", "content": user_prompt}
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
# from analyzer import analyze_code
|
| 4 |
|
| 5 |
# System prompt for the chatbot
|
|
|
|
| 16 |
# Function to handle chat
|
| 17 |
def chat_with_user(user_message, history):
|
| 18 |
from openai import OpenAI
|
| 19 |
+
client = OpenAI(api_key=os.getenv("modal_api"))
|
| 20 |
+
client.base_url = os.getenv("base_url")
|
| 21 |
# Build the message list for the LLM
|
| 22 |
messages = [
|
| 23 |
{"role": "system", "content": CHATBOT_SYSTEM_PROMPT}
|
|
|
|
| 28 |
messages.append({"role": "assistant", "content": msg[1]})
|
| 29 |
messages.append({"role": "user", "content": user_message})
|
| 30 |
response = client.chat.completions.create(
|
| 31 |
+
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",
|
| 32 |
messages=messages,
|
| 33 |
max_tokens=256,
|
| 34 |
temperature=0.7
|
|
|
|
| 40 |
def extract_keywords_from_conversation(history):
|
| 41 |
print("Extracting keywords from conversation...")
|
| 42 |
from openai import OpenAI
|
| 43 |
+
client = OpenAI(api_key=os.getenv("modal_api"))
|
| 44 |
+
client.base_url = os.getenv("base_url")
|
| 45 |
# Combine all user and assistant messages into a single string
|
| 46 |
conversation = "\n".join([f"User: {msg[0]}\nAssistant: {msg[1]}" for msg in history if msg[1]])
|
| 47 |
system_prompt = (
|
|
|
|
| 53 |
"Conversation:\n" + conversation + "\n\nExtract about 5 keywords for Hugging Face repo search."
|
| 54 |
)
|
| 55 |
response = client.chat.completions.create(
|
| 56 |
+
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16",
|
| 57 |
messages=[
|
| 58 |
{"role": "system", "content": system_prompt},
|
| 59 |
{"role": "user", "content": user_prompt}
|
test.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This simple script shows how to interact with an OpenAI-compatible server from a client."""
|
| 2 |
+
|
| 3 |
+
# import argparse
|
| 4 |
+
|
| 5 |
+
# import modal
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
client = OpenAI(api_key=os.getenv("modal_api"))
|
| 10 |
+
client.base_url = (
|
| 11 |
+
"https://alexprincecursor--example-vllm-openai-compatible-serve.modal.run/v1/"
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
response = client.chat.completions.create(
|
| 15 |
+
model="neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w4a16", # GPT-4.1 mini
|
| 16 |
+
messages=[
|
| 17 |
+
{"role": "system", "content": "You are a rockstar lyric generator. You are given a song and you need to generate a lyric for it."},
|
| 18 |
+
{"role": "user", "content":"The song is 'Bohemian Rhapsody' by Queen."}
|
| 19 |
+
],
|
| 20 |
+
max_tokens=512,
|
| 21 |
+
temperature=0.7
|
| 22 |
+
)
|
| 23 |
+
print(response.choices[0].message.content)
|