Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Upload 13 files
Browse files
    	
        README.md
    CHANGED
    
    | @@ -4,7 +4,7 @@ emoji: ππ | |
| 4 | 
             
            colorFrom: blue
         | 
| 5 | 
             
            colorTo: purple
         | 
| 6 | 
             
            sdk: gradio
         | 
| 7 | 
            -
            sdk_version:  | 
| 8 | 
             
            app_file: app.py
         | 
| 9 | 
             
            short_description: Text-to-Image
         | 
| 10 | 
             
            license: mit
         | 
|  | |
| 4 | 
             
            colorFrom: blue
         | 
| 5 | 
             
            colorTo: purple
         | 
| 6 | 
             
            sdk: gradio
         | 
| 7 | 
            +
            sdk_version: 5.0.1
         | 
| 8 | 
             
            app_file: app.py
         | 
| 9 | 
             
            short_description: Text-to-Image
         | 
| 10 | 
             
            license: mit
         | 
    	
        app.py
    CHANGED
    
    | @@ -22,96 +22,120 @@ css = """ | |
| 22 | 
             
            """
         | 
| 23 |  | 
| 24 | 
             
            with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
         | 
| 25 | 
            -
                with gr. | 
| 26 | 
            -
                    with gr. | 
| 27 | 
            -
                        with gr. | 
| 28 | 
            -
                            with gr. | 
| 29 | 
            -
                                 | 
| 30 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 31 | 
             
                                    with gr.Row():
         | 
| 32 | 
            -
                                         | 
| 33 | 
            -
                                         | 
| 34 | 
            -
                                        tagger_tag_type = gr.Radio(label="Convert tags to", info="danbooru for common, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
         | 
| 35 | 
             
                                    with gr.Row():
         | 
| 36 | 
            -
                                         | 
| 37 | 
            -
                                         | 
| 38 | 
            -
             | 
| 39 | 
            -
             | 
| 40 | 
            -
             | 
| 41 | 
            -
             | 
| 42 | 
            -
             | 
| 43 | 
            -
                                     | 
| 44 | 
            -
             | 
| 45 | 
            -
                                     | 
| 46 | 
            -
             | 
| 47 | 
            -
             | 
| 48 | 
            -
             | 
| 49 | 
            -
                                     | 
| 50 | 
            -
             | 
| 51 | 
            -
             | 
| 52 | 
            -
             | 
| 53 | 
            -
             | 
| 54 | 
            -
             | 
| 55 | 
            -
             | 
| 56 | 
            -
             | 
| 57 | 
            -
             | 
| 58 | 
            -
             | 
| 59 | 
            -
             | 
| 60 | 
            -
             | 
| 61 | 
            -
             | 
| 62 | 
            -
             | 
| 63 | 
            -
                                    cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
         | 
| 64 | 
            -
                                    seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
         | 
| 65 | 
            -
                                    seed_rand = gr.Button("Randomize Seed π²", size="sm", variant="secondary")
         | 
| 66 | 
            -
                                recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
         | 
| 67 | 
             
                                with gr.Row():
         | 
| 68 | 
            -
                                     | 
| 69 | 
            -
                                     | 
| 70 | 
            -
                                     | 
| 71 | 
            -
             | 
| 72 | 
             
                            with gr.Row():
         | 
| 73 | 
            -
                                 | 
| 74 | 
            -
                                 | 
| 75 | 
            -
                                 | 
| 76 | 
            -
                            
         | 
| 77 | 
            -
             | 
| 78 | 
            -
             | 
| 79 | 
            -
             | 
| 80 | 
            -
                             | 
| 81 | 
            -
             | 
| 82 | 
            -
             | 
| 83 | 
            -
             | 
| 84 | 
            -
             | 
| 85 | 
            -
             | 
| 86 | 
            -
             | 
| 87 | 
            -
             | 
| 88 | 
            -
             | 
| 89 | 
            -
             | 
| 90 | 
            -
             | 
| 91 | 
            -
             | 
| 92 | 
            -
             | 
| 93 | 
            -
             | 
| 94 | 
            -
             | 
| 95 | 
            -
             | 
| 96 | 
            -
             | 
| 97 | 
            -
             | 
| 98 | 
            -
                             | 
| 99 | 
            -
                            [ | 
| 100 | 
            -
                             | 
| 101 | 
            -
             | 
| 102 | 
            -
             | 
| 103 | 
            -
             | 
| 104 | 
            -
                         | 
| 105 | 
            -
             | 
| 106 | 
            -
             | 
| 107 | 
            -
             | 
| 108 | 
            -
             | 
| 109 | 
            -
             | 
| 110 | 
            -
             | 
| 111 | 
            -
             | 
| 112 | 
            -
             | 
| 113 | 
            -
                    gr. | 
| 114 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 115 |  | 
| 116 | 
             
                #gr.on(triggers=[run_button.click, prompt.submit, random_button.click], fn=lambda: gr.update(interactive=True), inputs=None, outputs=stop_button, show_api=False)
         | 
| 117 | 
             
                model_name.change(change_model, [model_name], [model_info], queue=False, show_api=False)\
         | 
| @@ -132,7 +156,7 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo: | |
| 132 | 
             
                    o.change(save_gallery, [o, results], [results, image_files], show_api=False)
         | 
| 133 | 
             
                    #stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event, gen_event2], show_api=False)
         | 
| 134 |  | 
| 135 | 
            -
                clear_prompt.click(lambda: None, None, [prompt], queue=False, show_api=False)
         | 
| 136 | 
             
                clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
         | 
| 137 | 
             
                recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
         | 
| 138 | 
             
                 [positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)
         | 
| @@ -157,5 +181,5 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo: | |
| 157 | 
             
                ).success(insert_recom_prompt, [prompt, neg_prompt, tagger_recom_prompt], [prompt, neg_prompt], queue=False, show_api=False,
         | 
| 158 | 
             
                ).success(compose_prompt_to_copy, [v2_character, v2_series, prompt], [prompt], queue=False, show_api=False)
         | 
| 159 |  | 
| 160 | 
            -
            demo.queue(default_concurrency_limit=200, max_size=200)
         | 
| 161 | 
             
            demo.launch(max_threads=400)
         | 
|  | |
| 22 | 
             
            """
         | 
| 23 |  | 
| 24 | 
             
            with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
         | 
| 25 | 
            +
                with gr.Tab("Image Generator"):
         | 
| 26 | 
            +
                    with gr.Row():
         | 
| 27 | 
            +
                        with gr.Column(scale=10): 
         | 
| 28 | 
            +
                            with gr.Group():
         | 
| 29 | 
            +
                                with gr.Accordion("Prompt from Image File", open=False):
         | 
| 30 | 
            +
                                    tagger_image = gr.Image(label="Input image", type="pil", format="png", sources=["upload", "clipboard"], height=256)
         | 
| 31 | 
            +
                                    with gr.Accordion(label="Advanced options", open=False):
         | 
| 32 | 
            +
                                        with gr.Row():
         | 
| 33 | 
            +
                                            tagger_general_threshold = gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.01, interactive=True)
         | 
| 34 | 
            +
                                            tagger_character_threshold = gr.Slider(label="Character threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.01, interactive=True)
         | 
| 35 | 
            +
                                            tagger_tag_type = gr.Radio(label="Convert tags to", info="danbooru for common, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
         | 
| 36 | 
            +
                                        with gr.Row():
         | 
| 37 | 
            +
                                            tagger_recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)  
         | 
| 38 | 
            +
                                            tagger_keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
         | 
| 39 | 
            +
                                    tagger_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner"], label="Algorithms", value=["Use WD Tagger"])
         | 
| 40 | 
            +
                                    tagger_generate_from_image = gr.Button(value="Generate Tags from Image", variant="secondary")
         | 
| 41 | 
            +
                                with gr.Accordion("Prompt Transformer", open=False):
         | 
| 42 | 
             
                                    with gr.Row():
         | 
| 43 | 
            +
                                        v2_character = gr.Textbox(label="Character", placeholder="hatsune miku", scale=2)
         | 
| 44 | 
            +
                                        v2_series = gr.Textbox(label="Series", placeholder="vocaloid", scale=2)
         | 
|  | |
| 45 | 
             
                                    with gr.Row():
         | 
| 46 | 
            +
                                        v2_rating = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="sfw")
         | 
| 47 | 
            +
                                        v2_aspect_ratio = gr.Radio(label="Aspect ratio", info="The aspect ratio of the image.", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square", visible=False)
         | 
| 48 | 
            +
                                        v2_length = gr.Radio(label="Length", info="The total length of the tags.", choices=list(V2_LENGTH_OPTIONS), value="long")
         | 
| 49 | 
            +
                                    with gr.Row():
         | 
| 50 | 
            +
                                        v2_identity = gr.Radio(label="Keep identity", info="How strictly to keep the identity of the character or subject. If you specify the detail of subject in the prompt, you should choose `strict`. Otherwise, choose `none` or `lax`. `none` is very creative but sometimes ignores the input prompt.", choices=list(V2_IDENTITY_OPTIONS), value="lax")                    
         | 
| 51 | 
            +
                                        v2_ban_tags = gr.Textbox(label="Ban tags", info="Tags to ban from the output.", placeholder="alternate costumen, ...", value="censored")
         | 
| 52 | 
            +
                                        v2_tag_type = gr.Radio(label="Tag Type", info="danbooru for common, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru", visible=False)
         | 
| 53 | 
            +
                                    v2_model = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
         | 
| 54 | 
            +
                                    v2_copy = gr.Button(value="Copy to clipboard", variant="secondary", size="sm", interactive=False)
         | 
| 55 | 
            +
                                    random_prompt = gr.Button(value="Extend π²", variant="secondary")
         | 
| 56 | 
            +
                                prompt = gr.Text(label="Prompt", lines=2, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
         | 
| 57 | 
            +
                                with gr.Accordion("Advanced options", open=False):
         | 
| 58 | 
            +
                                    neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="")
         | 
| 59 | 
            +
                                    with gr.Row():
         | 
| 60 | 
            +
                                        width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
         | 
| 61 | 
            +
                                        height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
         | 
| 62 | 
            +
                                        steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
         | 
| 63 | 
            +
                                    with gr.Row():
         | 
| 64 | 
            +
                                        cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
         | 
| 65 | 
            +
                                        seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
         | 
| 66 | 
            +
                                        seed_rand = gr.Button("Randomize Seed π²", size="sm", variant="secondary")
         | 
| 67 | 
            +
                                    recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
         | 
| 68 | 
            +
                                    with gr.Row():
         | 
| 69 | 
            +
                                        positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
         | 
| 70 | 
            +
                                        positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
         | 
| 71 | 
            +
                                        negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[])
         | 
| 72 | 
            +
                                        negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"])
         | 
|  | |
|  | |
|  | |
|  | |
| 73 | 
             
                                with gr.Row():
         | 
| 74 | 
            +
                                    image_num = gr.Slider(label="Number of images", minimum=1, maximum=max_images, value=1, step=1, interactive=True, scale=2)
         | 
| 75 | 
            +
                                    trans_prompt = gr.Button(value="Translate π", variant="secondary", size="sm", scale=2)
         | 
| 76 | 
            +
                                    clear_prompt = gr.Button(value="Clear ποΈ", variant="secondary", size="sm", scale=1)
         | 
| 77 | 
            +
                                
         | 
| 78 | 
             
                            with gr.Row():
         | 
| 79 | 
            +
                                run_button = gr.Button("Generate Image", variant="primary", scale=6)
         | 
| 80 | 
            +
                                random_button = gr.Button("Random Model π²", variant="secondary", scale=3)
         | 
| 81 | 
            +
                                #stop_button = gr.Button('Stop', variant="stop", interactive=False, scale=1)
         | 
| 82 | 
            +
                            with gr.Group():
         | 
| 83 | 
            +
                                model_name = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
         | 
| 84 | 
            +
                                model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_classes="model_info")
         | 
| 85 | 
            +
                        with gr.Column(scale=10): 
         | 
| 86 | 
            +
                            with gr.Group():
         | 
| 87 | 
            +
                                with gr.Row():
         | 
| 88 | 
            +
                                    output = [gr.Image(label='', elem_classes="output", type="filepath", format="png",
         | 
| 89 | 
            +
                                            show_download_button=True, show_share_button=False, show_label=False,
         | 
| 90 | 
            +
                                            interactive=False, min_width=80, visible=True, width=112, height=112) for _ in range(max_images)]
         | 
| 91 | 
            +
                            with gr.Group():
         | 
| 92 | 
            +
                                results = gr.Gallery(label="Gallery", elem_classes="gallery", interactive=False, show_download_button=True, show_share_button=False,
         | 
| 93 | 
            +
                                                    container=True, format="png", object_fit="cover", columns=2, rows=2)
         | 
| 94 | 
            +
                                image_files = gr.Files(label="Download", interactive=False)
         | 
| 95 | 
            +
                                clear_results = gr.Button("Clear Gallery / Download ποΈ", variant="secondary")
         | 
| 96 | 
            +
                    with gr.Column(): 
         | 
| 97 | 
            +
                        examples = gr.Examples(
         | 
| 98 | 
            +
                            examples = [
         | 
| 99 | 
            +
                                ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
         | 
| 100 | 
            +
                                ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
         | 
| 101 | 
            +
                                ["kafuu chino, 1girl, solo"],
         | 
| 102 | 
            +
                                ["1girl"],
         | 
| 103 | 
            +
                                ["beautiful sunset"],
         | 
| 104 | 
            +
                            ],
         | 
| 105 | 
            +
                            inputs=[prompt],
         | 
| 106 | 
            +
                            cache_examples=False,
         | 
| 107 | 
            +
                        )
         | 
| 108 | 
            +
                with gr.Tab("PNG Info"):
         | 
| 109 | 
            +
                    def extract_exif_data(image):
         | 
| 110 | 
            +
                        if image is None: return ""
         | 
| 111 | 
            +
                        try:
         | 
| 112 | 
            +
                            metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
         | 
| 113 | 
            +
                            for key in metadata_keys:
         | 
| 114 | 
            +
                                if key in image.info:
         | 
| 115 | 
            +
                                    return image.info[key]
         | 
| 116 | 
            +
                            return str(image.info)
         | 
| 117 | 
            +
                        except Exception as e:
         | 
| 118 | 
            +
                            return f"Error extracting metadata: {str(e)}"
         | 
| 119 | 
            +
                    with gr.Row():
         | 
| 120 | 
            +
                        with gr.Column():
         | 
| 121 | 
            +
                            image_metadata = gr.Image(label="Image with metadata", type="pil", sources=["upload"])
         | 
| 122 | 
            +
                        with gr.Column():
         | 
| 123 | 
            +
                            result_metadata = gr.Textbox(label="Metadata", show_label=True, show_copy_button=True, interactive=False, container=True, max_lines=99)
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                            image_metadata.change(
         | 
| 126 | 
            +
                                fn=extract_exif_data,
         | 
| 127 | 
            +
                                inputs=[image_metadata],
         | 
| 128 | 
            +
                                outputs=[result_metadata],
         | 
| 129 | 
            +
                            )
         | 
| 130 | 
            +
                gr.Markdown(
         | 
| 131 | 
            +
                    f"""This demo was created in reference to the following demos.<br>
         | 
| 132 | 
            +
            [Nymbo/Flood](https://huggingface.co/spaces/Nymbo/Flood), 
         | 
| 133 | 
            +
            [Yntec/ToyWorldXL](https://huggingface.co/spaces/Yntec/ToyWorldXL), 
         | 
| 134 | 
            +
            [Yntec/Diffusion80XX](https://huggingface.co/spaces/Yntec/Diffusion80XX).
         | 
| 135 | 
            +
                        """
         | 
| 136 | 
            +
                )
         | 
| 137 | 
            +
                gr.DuplicateButton(value="Duplicate Space")
         | 
| 138 | 
            +
                gr.Markdown(f"Just a few edits to *model.py* are all it takes to complete your own collection.")
         | 
| 139 |  | 
| 140 | 
             
                #gr.on(triggers=[run_button.click, prompt.submit, random_button.click], fn=lambda: gr.update(interactive=True), inputs=None, outputs=stop_button, show_api=False)
         | 
| 141 | 
             
                model_name.change(change_model, [model_name], [model_info], queue=False, show_api=False)\
         | 
|  | |
| 156 | 
             
                    o.change(save_gallery, [o, results], [results, image_files], show_api=False)
         | 
| 157 | 
             
                    #stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event, gen_event2], show_api=False)
         | 
| 158 |  | 
| 159 | 
            +
                clear_prompt.click(lambda: (None, None, None, None), None, [prompt, neg_prompt, v2_character, v2_series], queue=False, show_api=False)
         | 
| 160 | 
             
                clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
         | 
| 161 | 
             
                recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
         | 
| 162 | 
             
                 [positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)
         | 
|  | |
| 181 | 
             
                ).success(insert_recom_prompt, [prompt, neg_prompt, tagger_recom_prompt], [prompt, neg_prompt], queue=False, show_api=False,
         | 
| 182 | 
             
                ).success(compose_prompt_to_copy, [v2_character, v2_series, prompt], [prompt], queue=False, show_api=False)
         | 
| 183 |  | 
| 184 | 
            +
            #demo.queue(default_concurrency_limit=200, max_size=200)
         | 
| 185 | 
             
            demo.launch(max_threads=400)
         | 
    	
        model.py
    CHANGED
    
    | @@ -16,7 +16,7 @@ models = [ | |
| 16 | 
             
                'Meina/MeinaMix_V11',
         | 
| 17 | 
             
                'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
         | 
| 18 | 
             
                'KBlueLeaf/Kohaku-XL-Zeta',
         | 
| 19 | 
            -
                'kayfahaarukku/UrangDiffusion-1. | 
| 20 | 
             
                'Eugeoter/artiwaifu-diffusion-2.0',
         | 
| 21 | 
             
                'Raelina/Rae-Diffusion-XL-V2',
         | 
| 22 | 
             
                'Raelina/Raemu-XL-V4',
         | 
|  | |
| 16 | 
             
                'Meina/MeinaMix_V11',
         | 
| 17 | 
             
                'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
         | 
| 18 | 
             
                'KBlueLeaf/Kohaku-XL-Zeta',
         | 
| 19 | 
            +
                'kayfahaarukku/UrangDiffusion-1.4',
         | 
| 20 | 
             
                'Eugeoter/artiwaifu-diffusion-2.0',
         | 
| 21 | 
             
                'Raelina/Rae-Diffusion-XL-V2',
         | 
| 22 | 
             
                'Raelina/Raemu-XL-V4',
         | 
    	
        requirements.txt
    CHANGED
    
    | @@ -2,9 +2,8 @@ huggingface_hub | |
| 2 | 
             
            torch==2.2.0
         | 
| 3 | 
             
            torchvision
         | 
| 4 | 
             
            accelerate
         | 
| 5 | 
            -
            transformers
         | 
| 6 | 
             
            optimum[onnxruntime]
         | 
| 7 | 
            -
            spaces
         | 
| 8 | 
             
            dartrs
         | 
| 9 | 
             
            translatepy
         | 
| 10 | 
             
            timm
         | 
|  | |
| 2 | 
             
            torch==2.2.0
         | 
| 3 | 
             
            torchvision
         | 
| 4 | 
             
            accelerate
         | 
| 5 | 
            +
            transformers==4.44.0
         | 
| 6 | 
             
            optimum[onnxruntime]
         | 
|  | |
| 7 | 
             
            dartrs
         | 
| 8 | 
             
            translatepy
         | 
| 9 | 
             
            timm
         | 
