Alae65 commited on
Commit
0691226
·
verified ·
1 Parent(s): 4ba0b46

Update app.py with complete fal.ai provider code including PIL import

Browse files
Files changed (1) hide show
  1. app.py +42 -91
app.py CHANGED
@@ -1,68 +1,39 @@
1
  import gradio as gr
2
- import requests
3
  import os
4
  import base64
5
  from io import BytesIO
6
  from PIL import Image
7
- import json
8
 
9
- # Hugging Face API configuration
10
  HF_TOKEN = os.environ.get("HF_TOKEN", "")
11
- API_URL = "https://api-inference.huggingface.co/models/tencent/HunyuanImage-3.0"
12
-
13
- headers = {"Authorization": f"Bearer {HF_TOKEN}"}
14
 
15
  def generate_image_api(prompt, seed=42, num_inference_steps=50):
16
- """
17
- Generate image using Hugging Face Inference API
18
- Uses paid API from your HF account balance
19
- """
20
  try:
21
- payload = {
22
- "inputs": prompt,
23
- "parameters": {
24
- "seed": int(seed),
25
- "num_inference_steps": int(num_inference_steps)
26
- }
27
- }
28
-
29
- response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
30
-
31
- if response.status_code == 200:
32
- image = Image.open(BytesIO(response.content))
33
- return image, seed, "Success!"
34
- else:
35
- error_msg = f"API Error: {response.status_code} - {response.text}"
36
- print(error_msg)
37
- placeholder = Image.new('RGB', (1024, 1024), color=(240, 240, 245))
38
- return placeholder, seed, error_msg
39
-
40
  except Exception as e:
41
- error_msg = f"Error: {str(e)}"
42
- print(error_msg)
43
  placeholder = Image.new('RGB', (1024, 1024), color=(240, 240, 245))
44
- return placeholder, seed, error_msg
45
 
46
  def infer(prompt, seed, randomize_seed, diff_infer_steps, image_size):
47
  import random
48
  if randomize_seed:
49
- seed = random.randint(0, 2**32 - 1)
50
-
51
  image, used_seed, status = generate_image_api(prompt, seed, diff_infer_steps)
52
  return image, used_seed, status
53
 
54
  def api_generate(prompt: str, seed: int = 42, num_inference_steps: int = 50):
55
- """
56
- API endpoint for external integrations like n8n
57
- Returns base64 encoded image
58
- """
59
  try:
60
  image, used_seed, status = generate_image_api(prompt, seed, num_inference_steps)
61
-
62
  buffered = BytesIO()
63
  image.save(buffered, format="PNG")
64
  img_str = base64.b64encode(buffered.getvalue()).decode()
65
-
66
  return {
67
  "success": True,
68
  "image_base64": img_str,
@@ -86,30 +57,26 @@ examples = [
86
 
87
  css = """
88
  #col-container {
89
- margin: 0 auto;
90
- max-width: 800px;
91
  }
92
  .note {
93
- background: #fff3cd;
94
- padding: 15px;
95
- border-radius: 8px;
96
- margin: 10px 0;
97
  }
98
  """
99
 
100
  with gr.Blocks(css=css) as demo:
101
  with gr.Column(elem_id="col-container"):
102
- gr.Markdown("# 🎨 HunyuanImage-3.0 Text-to-Image with Inference API")
103
  gr.Markdown(
104
- """### Tencent HunyuanImage-3.0 - Using Paid Hugging Face Inference API
105
-
106
- This Space now uses the Hugging Face Inference API (paid from your account balance)
107
- - Real image generation with HunyuanImage-3.0
108
- - API endpoint available for n8n integration
109
- - Set your HF_TOKEN in Space secrets
110
-
111
- 🔗 For n8n integration: Use the API endpoint at /gradio_api/ with the api_generate function
112
- """,
113
  elem_classes="note"
114
  )
115
 
@@ -121,8 +88,7 @@ with gr.Blocks(css=css) as demo:
121
  placeholder="Enter your prompt for image generation...",
122
  value="A serene mountain landscape with a crystal clear lake"
123
  )
124
-
125
- run_button = gr.Button("🎨 Generate Image", variant="primary")
126
 
127
  result = gr.Image(label="Generated Image", show_label=True)
128
  status_text = gr.Textbox(label="Status", interactive=False)
@@ -135,48 +101,33 @@ with gr.Blocks(css=css) as demo:
135
  step=1,
136
  value=42,
137
  )
138
-
139
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
140
-
141
  diff_infer_steps = gr.Slider(
142
- label="Diffusion inference steps",
143
- minimum=10,
144
  maximum=100,
145
- step=10,
146
  value=50,
147
  )
148
-
149
  image_size = gr.Radio(
150
- label="Image Size",
151
- choices=["auto", "1024x1024", "1280x768", "768x1280"],
152
- value="auto",
153
  )
154
 
155
- gr.Examples(examples=examples, inputs=[prompt])
156
-
157
- run_button.click(
158
- fn=infer,
159
- inputs=[prompt, seed, randomize_seed, diff_infer_steps, image_size],
160
  outputs=[result, seed, status_text],
 
 
161
  )
162
-
163
- api_demo = gr.Interface(
164
- fn=api_generate,
165
- inputs=[
166
- gr.Text(label="Prompt"),
167
- gr.Number(label="Seed", value=42),
168
- gr.Number(label="Inference Steps", value=50)
169
- ],
170
- outputs=gr.JSON(label="Response"),
171
- title="HunyuanImage-3.0 API Endpoint",
172
- description="API endpoint for n8n and other integrations. Returns base64 encoded image."
173
- )
174
-
175
- app = gr.TabbedInterface(
176
- [demo, api_demo],
177
- ["Interface", "API Endpoint"],
178
- title="HunyuanImage-3.0 Generator"
179
- )
180
 
181
  if __name__ == "__main__":
182
- app.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
  import os
4
  import base64
5
  from io import BytesIO
6
  from PIL import Image
 
7
 
 
8
  HF_TOKEN = os.environ.get("HF_TOKEN", "")
9
+ client = InferenceClient(provider="fal-ai", api_key=HF_TOKEN)
 
 
10
 
11
  def generate_image_api(prompt, seed=42, num_inference_steps=50):
 
 
 
 
12
  try:
13
+ image = client.text_to_image(
14
+ prompt,
15
+ model="tencent/HunyuanImage-3.0",
16
+ seed=int(seed),
17
+ num_inference_steps=int(num_inference_steps)
18
+ )
19
+ return image, seed, "Success!"
 
 
 
 
 
 
 
 
 
 
 
 
20
  except Exception as e:
 
 
21
  placeholder = Image.new('RGB', (1024, 1024), color=(240, 240, 245))
22
+ return placeholder, seed, f"Error: {str(e)}"
23
 
24
  def infer(prompt, seed, randomize_seed, diff_infer_steps, image_size):
25
  import random
26
  if randomize_seed:
27
+ seed = random.randint(0, 2 ** 32 - 1)
 
28
  image, used_seed, status = generate_image_api(prompt, seed, diff_infer_steps)
29
  return image, used_seed, status
30
 
31
  def api_generate(prompt: str, seed: int = 42, num_inference_steps: int = 50):
 
 
 
 
32
  try:
33
  image, used_seed, status = generate_image_api(prompt, seed, num_inference_steps)
 
34
  buffered = BytesIO()
35
  image.save(buffered, format="PNG")
36
  img_str = base64.b64encode(buffered.getvalue()).decode()
 
37
  return {
38
  "success": True,
39
  "image_base64": img_str,
 
57
 
58
  css = """
59
  #col-container {
60
+ margin: 0 auto;
61
+ max-width: 800px;
62
  }
63
  .note {
64
+ background: #fff3cd;
65
+ padding: 15px;
66
+ border-radius: 8px;
67
+ margin: 10px 0;
68
  }
69
  """
70
 
71
  with gr.Blocks(css=css) as demo:
72
  with gr.Column(elem_id="col-container"):
73
+ gr.Markdown("# 🎨 HunyuanImage-3.0 Text-to-Image using fal.ai Provider")
74
  gr.Markdown(
75
+ """
76
+ ### Tencent HunyuanImage-3.0 - Using fal.ai Inference Provider
77
+ هذه المساحة الآن تولّد صور حقيقية فعلياً عبر مزود fal.ai
78
+ 🔗 كل شيء يعمل تلقائياً باستخدام التوكن HF_TOKEN من أسرارك.
79
+ """,
 
 
 
 
80
  elem_classes="note"
81
  )
82
 
 
88
  placeholder="Enter your prompt for image generation...",
89
  value="A serene mountain landscape with a crystal clear lake"
90
  )
91
+ run_button = gr.Button("🎨 Generate Image", variant="primary")
 
92
 
93
  result = gr.Image(label="Generated Image", show_label=True)
94
  status_text = gr.Textbox(label="Status", interactive=False)
 
101
  step=1,
102
  value=42,
103
  )
 
104
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
105
  diff_infer_steps = gr.Slider(
106
+ label="Inference Steps",
107
+ minimum=1,
108
  maximum=100,
109
+ step=1,
110
  value=50,
111
  )
 
112
  image_size = gr.Radio(
113
+ choices=["1024x1024"],
114
+ value="1024x1024",
115
+ label="Image Size"
116
  )
117
 
118
+ gr.Examples(
119
+ examples=examples,
120
+ inputs=[prompt],
 
 
121
  outputs=[result, seed, status_text],
122
+ fn=infer,
123
+ cache_examples=False,
124
  )
125
+
126
+ run_button.click(
127
+ fn=infer,
128
+ inputs=[prompt, seed, randomize_seed, diff_infer_steps, image_size],
129
+ outputs=[result, seed, status_text],
130
+ )
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
  if __name__ == "__main__":
133
+ demo.launch()