Spaces:
Running
Running
| import gradio as gr | |
| from haystack.nodes import TransformersImageToText | |
| from haystack.nodes import PromptNode, PromptTemplate | |
| from haystack import Pipeline | |
| image_to_text = TransformersImageToText( | |
| model_name_or_path="nlpconnect/vit-gpt2-image-captioning", | |
| use_gpu=True, | |
| batch_size=16, | |
| progress_bar=True | |
| ) | |
| prompt_template = PromptTemplate(prompt=""" | |
| You will receive a describing text of a photo. | |
| Try to come up with a nice Instagram caption that has a phrase rhyming with the text | |
| Describing text:{documents}; | |
| Caption: | |
| """) | |
| # prompt_node = PromptNode(model_name_or_path="gpt-3.5-turbo", api_key=api_key, default_prompt_template=pt) | |
| prompt_node = PromptNode(model_name_or_path="google/flan-t5-large", default_prompt_template=prompt_template) | |
| # prompt_node = PromptNode(model_name_or_path="tiiuae/falcon-7b-instruct", api_key=hf_api_key, default_prompt_template=pt, model_kwargs={"trust_remote_code":True}) | |
| captioning_pipeline = Pipeline() | |
| captioning_pipeline.add_node(component=image_to_text, name="image_to_text", inputs=["File"]) | |
| captioning_pipeline.add_node(component=prompt_node, name="prompt_node", inputs=["image_to_text"]) | |
| def generate_caption(image_file_paths): | |
| print(image_file_paths) | |
| # documents = image_to_text.generate_captions(image_file_paths=[image_file_paths]) | |
| # print(documents[0].content) | |
| caption = captioning_pipeline.run(file_paths=image_file_paths) | |
| return caption["results"][0] | |
| with gr.Blocks(theme="soft") as demo: | |
| image = gr.Image(type="filepath") | |
| submit_btn = gr.Button("✨ Captionate ✨") | |
| caption = gr.Textbox(label="Caption") | |
| submit_btn.click(fn=generate_caption, inputs=[image], outputs=[caption]) | |
| if __name__ == "__main__": | |
| demo.launch() |