Spaces:
Runtime error
Runtime error
Commit
·
c767532
1
Parent(s):
2cac745
Integrate Gradio client for LLM predictions and update requirements
Browse files- app.py +8 -13
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -15,8 +15,10 @@ from trellis.pipelines import TrellisImageTo3DPipeline
|
|
| 15 |
from trellis.representations import Gaussian, MeshExtractResult
|
| 16 |
from trellis.utils import render_utils, postprocessing_utils
|
| 17 |
from huggingface_hub import InferenceClient
|
|
|
|
| 18 |
|
| 19 |
client = InferenceClient(api_key=os.environ["HF_API_KEY"])
|
|
|
|
| 20 |
|
| 21 |
def generate_t2i_prompt(item_name):
|
| 22 |
llm_prompt_template = """You are tasked with creating a concise yet highly detailed description of an item to be used for generating an image in a game development pipeline. The image should show the **entire item** with no parts cropped or hidden. The background should always be plain and monocolor, with no focus on it.
|
|
@@ -37,19 +39,12 @@ Now generate a concise description for the item: "{item_name}"
|
|
| 37 |
Focus on the item itself, ensuring it is fully described, and specify a plain, white background and the output is no longer than 77 tokens.
|
| 38 |
"""
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
]
|
| 46 |
-
|
| 47 |
-
completion = client.chat.completions.create(
|
| 48 |
-
model="Qwen/Qwen2.5-72B-Instruct",
|
| 49 |
-
messages=messages,
|
| 50 |
-
max_tokens=500
|
| 51 |
-
)
|
| 52 |
-
object_t2i_prompt = completion.choices[0].message.content
|
| 53 |
print(object_t2i_prompt)
|
| 54 |
|
| 55 |
return object_t2i_prompt
|
|
|
|
| 15 |
from trellis.representations import Gaussian, MeshExtractResult
|
| 16 |
from trellis.utils import render_utils, postprocessing_utils
|
| 17 |
from huggingface_hub import InferenceClient
|
| 18 |
+
from gradio_client import Client
|
| 19 |
|
| 20 |
client = InferenceClient(api_key=os.environ["HF_API_KEY"])
|
| 21 |
+
llm_client = Client("Qwen/Qwen2.5-72B-Instruct")
|
| 22 |
|
| 23 |
def generate_t2i_prompt(item_name):
|
| 24 |
llm_prompt_template = """You are tasked with creating a concise yet highly detailed description of an item to be used for generating an image in a game development pipeline. The image should show the **entire item** with no parts cropped or hidden. The background should always be plain and monocolor, with no focus on it.
|
|
|
|
| 39 |
Focus on the item itself, ensuring it is fully described, and specify a plain, white background and the output is no longer than 77 tokens.
|
| 40 |
"""
|
| 41 |
|
| 42 |
+
object_t2i_prompt = llm_client.predict(
|
| 43 |
+
query=llm_prompt_template.format(item_name=item_name),
|
| 44 |
+
history=[],
|
| 45 |
+
system="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.",
|
| 46 |
+
api_name="/model_chat",
|
| 47 |
+
)[1][0][-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
print(object_t2i_prompt)
|
| 49 |
|
| 50 |
return object_t2i_prompt
|
requirements.txt
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
--extra-index-url https://download.pytorch.org/whl/cu121
|
| 2 |
--find-links https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.4.0_cu121.html
|
| 3 |
|
|
|
|
| 4 |
huggingface-hub==0.26.5
|
| 5 |
torch==2.4.0
|
| 6 |
torchvision==0.19.0
|
|
|
|
| 1 |
--extra-index-url https://download.pytorch.org/whl/cu121
|
| 2 |
--find-links https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.4.0_cu121.html
|
| 3 |
|
| 4 |
+
gradio_client
|
| 5 |
huggingface-hub==0.26.5
|
| 6 |
torch==2.4.0
|
| 7 |
torchvision==0.19.0
|