Spaces:
Runtime error
Runtime error
| import os | |
| import sys | |
| import numpy as np | |
| import streamlit as st | |
| from PIL import Image | |
| # import clip | |
| # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | |
| # import gradio as gr | |
| # from dalle.models import Dalle | |
| # from dalle.utils.utils import clip_score, set_seed | |
| device = "cpu" | |
| # model = Dalle.from_pretrained("minDALL-E/1.3B") # This will automatically download the pretrained model. | |
| # model.to(device=device) | |
| # model_clip, preprocess_clip = clip.load("ViT-B/32", device=device) | |
| # model_clip.to(device=device) | |
| # def sample(prompt): | |
| # # Sampling | |
| # images = ( | |
| # model.sampling(prompt=prompt, top_k=256, top_p=None, softmax_temperature=1.0, num_candidates=3, device=device) | |
| # .cpu() | |
| # .numpy() | |
| # ) | |
| # images = np.transpose(images, (0, 2, 3, 1)) | |
| # # CLIP Re-ranking | |
| # rank = clip_score( | |
| # prompt=prompt, images=images, model_clip=model_clip, preprocess_clip=preprocess_clip, device=device | |
| # ) | |
| # # Save images | |
| # images = images[rank] | |
| # # print(rank, images.shape) | |
| # pil_images = [] | |
| # for i in range(len(images)): | |
| # im = Image.fromarray((images[i] * 255).astype(np.uint8)) | |
| # pil_images.append(im) | |
| # # im = Image.fromarray((images[0] * 255).astype(np.uint8)) | |
| # return pil_images | |
| # title = "Interactive demo: ImageGPT" | |
| # description = "Demo for OpenAI's ImageGPT: Generative Pretraining from Pixels. To use it, simply upload an image or use the example image below and click 'submit'. Results will show up in a few seconds." | |
| # article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.10282'>ImageGPT: Generative Pretraining from Pixels</a> | <a href='https://openai.com/blog/image-gpt/'>Official blog</a></p>" | |
| # iface = gr.Interface( | |
| # fn=sample, | |
| # inputs=[gr.inputs.Textbox(label="What would you like to see?")], | |
| # outputs=gr.outputs.Image(type="pil", label="Model input + completions"), | |
| # title=title, | |
| # description=description, | |
| # article=article, | |
| # #examples=examples, | |
| # enable_queue=True, | |
| # ) | |
| # iface.launch(debug=True) | |
| #!/usr/bin/env python | |
| # coding: utf-8 | |
| st.sidebar.markdown( | |
| """ | |
| <style> | |
| .aligncenter { | |
| text-align: center; | |
| } | |
| </style> | |
| <p class="aligncenter"> | |
| <img src="https://raw.githubusercontent.com/borisdayma/dalle-mini/main/img/logo.png"/> | |
| </p> | |
| """, | |
| unsafe_allow_html=True, | |
| ) | |
| st.sidebar.markdown( | |
| """ | |
| ___ | |
| <p style='text-align: center'> | |
| DALL路E mini is an AI model that generates images from any prompt you give! | |
| </p> | |
| <p style='text-align: center'> | |
| Created by Boris Dayma et al. 2021 | |
| <br/> | |
| <a href="https://github.com/borisdayma/dalle-mini" target="_blank">GitHub</a> | <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA" target="_blank">Project Report</a> | |
| </p> | |
| """, | |
| unsafe_allow_html=True, | |
| ) | |
| st.header("DALL路E mini") | |
| st.subheader("Generate images from text") | |
| prompt = st.text_input("What do you want to see?") | |
| DEBUG = False | |
| # if prompt != "": | |
| # container = st.empty() | |
| # container.markdown( | |
| # f""" | |
| # <style> p {{ margin:0 }} div {{ margin:0 }} </style> | |
| # <div data-stale="false" class="element-container css-1e5imcs e1tzin5v1"> | |
| # <div class="stAlert"> | |
| # <div role="alert" data-baseweb="notification" class="st-ae st-af st-ag st-ah st-ai st-aj st-ak st-g3 st-am st-b8 st-ao st-ap st-aq st-ar st-as st-at st-au st-av st-aw st-ax st-ay st-az st-b9 st-b1 st-b2 st-b3 st-b4 st-b5 st-b6"> | |
| # <div class="st-b7"> | |
| # <div class="css-whx05o e13vu3m50"> | |
| # <div data-testid="stMarkdownContainer" class="css-1ekf893 e16nr0p30"> | |
| # <img src="https://raw.githubusercontent.com/borisdayma/dalle-mini/main/app/streamlit/img/loading.gif" width="30"/> | |
| # Generating predictions for: <b>{prompt}</b> | |
| # </div> | |
| # </div> | |
| # </div> | |
| # </div> | |
| # </div> | |
| # </div> | |
| # <small><i>Predictions may take up to 40s under high load. Please stand by.</i></small> | |
| # """, | |
| # unsafe_allow_html=True, | |
| # ) | |
| # print(f"Getting selections: {prompt}") | |
| # selected = sample(prompt) | |
| # margin = 0.1 # for better position of zoom in arrow | |
| # n_columns = 3 | |
| # cols = st.columns([1] + [margin, 1] * (n_columns - 1)) | |
| # for i, img in enumerate(selected): | |
| # cols[(i % n_columns) * 2].image(img) | |
| # container.markdown(f"**{prompt}**") | |
| # st.button("Again!", key="again_button") | |