KingNish commited on
Commit
4285f81
·
1 Parent(s): 4135261

Changes to be committed:

Browse files
Files changed (1) hide show
  1. app.py +7 -14
app.py CHANGED
@@ -1,13 +1,6 @@
1
  import gradio as gr
2
  from gradio_client import Client, handle_file
3
 
4
- def set_client_for_session(request: gr.Request):
5
- x_ip_token = request.headers['x-ip-token']
6
- print(x_ip_token)
7
- flux_client = Client("black-forest-labs/FLUX.1-Kontext-Dev", headers={"x-ip-token": x_ip_token})
8
- ltx_client = Client("KingNish/ltx-video-distilled", headers={"x-ip-token": x_ip_token})
9
- return flux_client, ltx_client
10
-
11
  def image_to_image(prompt, image, flux_client):
12
  prompt = "You are an AI image-to-image editor that transforms a user-provided input image based on their prompt while maintaining consistency and fidelity to the original image. Generate an output image that accurately reflects the user's requested modifications, preserving key elements like style, composition, and context from the input image unless explicitly instructed otherwise. " + prompt
13
  image = flux_client.predict(
@@ -34,22 +27,24 @@ def image_to_video(prompt, image, ltx_client):
34
  print(result)
35
  return result
36
 
37
- def personalized_video(prompt, image, flux_client, ltx_client):
 
 
 
 
38
  image = image_to_image(prompt, image, flux_client)
39
  yield image, None
40
  video = image_to_video(prompt, image, ltx_client)
41
  yield image, video
42
 
43
  with gr.Blocks() as demo:
44
- flux_client = gr.State(None)
45
- ltx_client = gr.State(None)
46
  input_image = gr.Image(type="filepath")
47
  prompt = gr.Textbox(max_lines=1)
48
  edited_image = gr.Image()
49
  output_video = gr.Video()
50
  submit_button = gr.Button("Submit")
51
- submit_button.click(personalized_video, [prompt, input_image, flux_client, ltx_client], [edited_image, output_video])
52
- prompt.submit(personalized_video, [prompt, input_image, flux_client, ltx_client], [edited_image, output_video])
53
 
54
  # gr.Examples(
55
  # examples=[
@@ -62,7 +57,5 @@ with gr.Blocks() as demo:
62
  # cache_mode = "eager"
63
  # )
64
 
65
- demo.load(set_client_for_session, None, [flux_client, ltx_client])
66
-
67
  demo.launch()
68
 
 
1
  import gradio as gr
2
  from gradio_client import Client, handle_file
3
 
 
 
 
 
 
 
 
4
  def image_to_image(prompt, image, flux_client):
5
  prompt = "You are an AI image-to-image editor that transforms a user-provided input image based on their prompt while maintaining consistency and fidelity to the original image. Generate an output image that accurately reflects the user's requested modifications, preserving key elements like style, composition, and context from the input image unless explicitly instructed otherwise. " + prompt
6
  image = flux_client.predict(
 
27
  print(result)
28
  return result
29
 
30
+ def personalized_video(prompt, image, flux_client, ltx_client, request: gr.Request):
31
+ x_ip_token = request.headers['x-ip-token']
32
+ flux_client = Client("black-forest-labs/FLUX.1-Kontext-Dev", headers={"x-ip-token": x_ip_token})
33
+ ltx_client = Client("KingNish/ltx-video-distilled", headers={"x-ip-token": x_ip_token})
34
+ print(x_ip_token)
35
  image = image_to_image(prompt, image, flux_client)
36
  yield image, None
37
  video = image_to_video(prompt, image, ltx_client)
38
  yield image, video
39
 
40
  with gr.Blocks() as demo:
 
 
41
  input_image = gr.Image(type="filepath")
42
  prompt = gr.Textbox(max_lines=1)
43
  edited_image = gr.Image()
44
  output_video = gr.Video()
45
  submit_button = gr.Button("Submit")
46
+ submit_button.click(personalized_video, [prompt, input_image], [edited_image, output_video])
47
+ prompt.submit(personalized_video, [prompt, input_image], [edited_image, output_video])
48
 
49
  # gr.Examples(
50
  # examples=[
 
57
  # cache_mode = "eager"
58
  # )
59
 
 
 
60
  demo.launch()
61