Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	Upload 3 files
Browse files- README.md +4 -7
- app.py +29 -25
- externalmod.py +27 -0
    	
        README.md
    CHANGED
    
    | @@ -1,16 +1,13 @@ | |
| 1 | 
             
            ---
         | 
| 2 | 
            -
            title:  | 
| 3 | 
             
            emoji: 🛕🛕
         | 
| 4 | 
             
            colorFrom: green
         | 
| 5 | 
             
            colorTo: blue
         | 
| 6 | 
             
            sdk: gradio
         | 
| 7 | 
            -
            sdk_version:  | 
| 8 | 
             
            app_file: app.py
         | 
| 9 | 
            -
            pinned:  | 
| 10 | 
            -
             | 
| 11 | 
            -
            - Yntec/Diffusion80XX
         | 
| 12 | 
            -
            - Yntec/HuggingfaceDiffusion
         | 
| 13 | 
            -
            short_description: Compare up to 6 image models!
         | 
| 14 | 
             
            ---
         | 
| 15 |  | 
| 16 | 
             
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         | 
|  | |
| 1 | 
             
            ---
         | 
| 2 | 
            +
            title: Huggingface Diffusion
         | 
| 3 | 
             
            emoji: 🛕🛕
         | 
| 4 | 
             
            colorFrom: green
         | 
| 5 | 
             
            colorTo: blue
         | 
| 6 | 
             
            sdk: gradio
         | 
| 7 | 
            +
            sdk_version: 5.0.1
         | 
| 8 | 
             
            app_file: app.py
         | 
| 9 | 
            +
            pinned: true
         | 
| 10 | 
            +
            short_description: Compare 909+ AI Art Models 6 at a time!
         | 
|  | |
|  | |
|  | |
| 11 | 
             
            ---
         | 
| 12 |  | 
| 13 | 
             
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         | 
    	
        app.py
    CHANGED
    
    | @@ -1,7 +1,6 @@ | |
| 1 | 
             
            import gradio as gr
         | 
| 2 | 
            -
            from random import randint
         | 
| 3 | 
             
            from all_models import models
         | 
| 4 | 
            -
            from externalmod import gr_Interface_load
         | 
| 5 | 
             
            import asyncio
         | 
| 6 | 
             
            import os
         | 
| 7 | 
             
            from threading import RLock
         | 
| @@ -49,21 +48,16 @@ def random_choices(): | |
| 49 |  | 
| 50 | 
             
            # https://huggingface.co/docs/api-inference/detailed_parameters
         | 
| 51 | 
             
            # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
         | 
| 52 | 
            -
            async def infer(model_str, prompt, nprompt="", height= | 
| 53 | 
            -
                from pathlib import Path
         | 
| 54 | 
             
                kwargs = {}
         | 
| 55 | 
            -
                if height  | 
| 56 | 
            -
                if width  | 
| 57 | 
            -
                if steps  | 
| 58 | 
            -
                if cfg  | 
| 59 | 
            -
                 | 
| 60 | 
            -
                 | 
| 61 | 
            -
                else:
         | 
| 62 | 
            -
                    rand = randint(1, 500)
         | 
| 63 | 
            -
                    for i in range(rand):
         | 
| 64 | 
            -
                        noise += " "
         | 
| 65 | 
             
                task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
         | 
| 66 | 
            -
                                           prompt= | 
| 67 | 
             
                await asyncio.sleep(0)
         | 
| 68 | 
             
                try:
         | 
| 69 | 
             
                    result = await asyncio.wait_for(task, timeout=timeout)
         | 
| @@ -72,22 +66,21 @@ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=No | |
| 72 | 
             
                    print(f"Task timed out: {model_str}")
         | 
| 73 | 
             
                    if not task.done(): task.cancel()
         | 
| 74 | 
             
                    result = None
         | 
| 75 | 
            -
                    raise Exception(f"Task timed out: {model_str}")
         | 
| 76 | 
             
                except Exception as e:
         | 
| 77 | 
             
                    print(e)
         | 
| 78 | 
             
                    if not task.done(): task.cancel()
         | 
| 79 | 
             
                    result = None
         | 
| 80 | 
            -
                    raise Exception(e | 
| 81 | 
             
                if task.done() and result is not None and not isinstance(result, tuple):
         | 
| 82 | 
             
                    with lock:
         | 
| 83 | 
             
                        png_path = "image.png"
         | 
| 84 | 
            -
                        result | 
| 85 | 
            -
                        image = str(Path(png_path).resolve())
         | 
| 86 | 
             
                    return image
         | 
| 87 | 
             
                return None
         | 
| 88 |  | 
| 89 |  | 
| 90 | 
            -
            def gen_fn(model_str, prompt, nprompt="", height= | 
| 91 | 
             
                try:
         | 
| 92 | 
             
                    loop = asyncio.new_event_loop()
         | 
| 93 | 
             
                    result = loop.run_until_complete(infer(model_str, prompt, nprompt,
         | 
| @@ -117,8 +110,15 @@ CSS=""" | |
| 117 | 
             
            """
         | 
| 118 |  | 
| 119 |  | 
| 120 | 
            -
            with gr.Blocks(theme=' | 
| 121 | 
            -
                 | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 122 | 
             
                    with gr.Column(scale=2):
         | 
| 123 | 
             
                        with gr.Group():
         | 
| 124 | 
             
                            txt_input = gr.Textbox(label='Your prompt:', lines=4)
         | 
| @@ -131,6 +131,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo: | |
| 131 | 
             
                                    steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
         | 
| 132 | 
             
                                    cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
         | 
| 133 | 
             
                                    seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
         | 
|  | |
|  | |
| 134 | 
             
                        with gr.Row():
         | 
| 135 | 
             
                            gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', variant='primary', scale=3)
         | 
| 136 | 
             
                            random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
         | 
| @@ -142,7 +144,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo: | |
| 142 | 
             
                        with gr.Group():
         | 
| 143 | 
             
                            with gr.Row():
         | 
| 144 | 
             
                                output = [gr.Image(label=m, show_download_button=True, elem_classes="output",
         | 
| 145 | 
            -
                                          interactive=False,  | 
| 146 | 
             
                                          visible=True) for m in default_models]
         | 
| 147 | 
             
                                current_models = [gr.Textbox(m, visible=False) for m in default_models]
         | 
| 148 |  | 
| @@ -179,6 +181,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo: | |
| 179 | 
             
                                    steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
         | 
| 180 | 
             
                                    cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
         | 
| 181 | 
             
                                    seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
         | 
|  | |
|  | |
| 182 | 
             
                        num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
         | 
| 183 | 
             
                        with gr.Row():
         | 
| 184 | 
             
                            gen_button2 = gr.Button('Generate', variant='primary', scale=2)
         | 
| @@ -189,7 +193,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo: | |
| 189 | 
             
                        with gr.Group():
         | 
| 190 | 
             
                            with gr.Row():
         | 
| 191 | 
             
                                output2 = [gr.Image(label='', show_download_button=True, elem_classes="output",
         | 
| 192 | 
            -
                                           interactive=False,  | 
| 193 | 
             
                                           show_share_button=False, show_label=False) for _ in range(max_images)]
         | 
| 194 |  | 
| 195 | 
             
                    with gr.Column(scale=2):
         | 
| @@ -210,6 +214,6 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo: | |
| 210 |  | 
| 211 | 
             
                gr.Markdown("Based on the [TestGen](https://huggingface.co/spaces/derwahnsinn/TestGen) Space by derwahnsinn, the [SpacIO](https://huggingface.co/spaces/RdnUser77/SpacIO_v1) Space by RdnUser77 and Omnibus's Maximum Multiplier!")
         | 
| 212 |  | 
| 213 | 
            -
            demo.queue(default_concurrency_limit=200, max_size=200)
         | 
| 214 | 
             
            demo.launch(show_api=False, max_threads=400)
         | 
| 215 | 
             
            # https://github.com/gradio-app/gradio/issues/6339
         | 
|  | |
| 1 | 
             
            import gradio as gr
         | 
|  | |
| 2 | 
             
            from all_models import models
         | 
| 3 | 
            +
            from externalmod import gr_Interface_load, save_image, randomize_seed
         | 
| 4 | 
             
            import asyncio
         | 
| 5 | 
             
            import os
         | 
| 6 | 
             
            from threading import RLock
         | 
|  | |
| 48 |  | 
| 49 | 
             
            # https://huggingface.co/docs/api-inference/detailed_parameters
         | 
| 50 | 
             
            # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
         | 
| 51 | 
            +
            async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
         | 
|  | |
| 52 | 
             
                kwargs = {}
         | 
| 53 | 
            +
                if height > 0: kwargs["height"] = height
         | 
| 54 | 
            +
                if width > 0: kwargs["width"] = width
         | 
| 55 | 
            +
                if steps > 0: kwargs["num_inference_steps"] = steps
         | 
| 56 | 
            +
                if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
         | 
| 57 | 
            +
                if seed == -1: kwargs["seed"] = randomize_seed()
         | 
| 58 | 
            +
                else: kwargs["seed"] = seed
         | 
|  | |
|  | |
|  | |
|  | |
| 59 | 
             
                task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
         | 
| 60 | 
            +
                                           prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
         | 
| 61 | 
             
                await asyncio.sleep(0)
         | 
| 62 | 
             
                try:
         | 
| 63 | 
             
                    result = await asyncio.wait_for(task, timeout=timeout)
         | 
|  | |
| 66 | 
             
                    print(f"Task timed out: {model_str}")
         | 
| 67 | 
             
                    if not task.done(): task.cancel()
         | 
| 68 | 
             
                    result = None
         | 
| 69 | 
            +
                    raise Exception(f"Task timed out: {model_str}") from e
         | 
| 70 | 
             
                except Exception as e:
         | 
| 71 | 
             
                    print(e)
         | 
| 72 | 
             
                    if not task.done(): task.cancel()
         | 
| 73 | 
             
                    result = None
         | 
| 74 | 
            +
                    raise Exception() from e
         | 
| 75 | 
             
                if task.done() and result is not None and not isinstance(result, tuple):
         | 
| 76 | 
             
                    with lock:
         | 
| 77 | 
             
                        png_path = "image.png"
         | 
| 78 | 
            +
                        image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, seed)
         | 
|  | |
| 79 | 
             
                    return image
         | 
| 80 | 
             
                return None
         | 
| 81 |  | 
| 82 |  | 
| 83 | 
            +
            def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
         | 
| 84 | 
             
                try:
         | 
| 85 | 
             
                    loop = asyncio.new_event_loop()
         | 
| 86 | 
             
                    result = loop.run_until_complete(infer(model_str, prompt, nprompt,
         | 
|  | |
| 110 | 
             
            """
         | 
| 111 |  | 
| 112 |  | 
| 113 | 
            +
            with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as demo:
         | 
| 114 | 
            +
                gr.HTML(
         | 
| 115 | 
            +
                """
         | 
| 116 | 
            +
                    <div>
         | 
| 117 | 
            +
                    <p> <center>For simultaneous generations without hidden queue check out <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>! For more options like single model x6 check out <a href="https://huggingface.co/spaces/John6666/Diffusion80XX4sg">Diffusion80XX4sg</a> by John6666!</center>
         | 
| 118 | 
            +
                    </p></div>
         | 
| 119 | 
            +
                """
         | 
| 120 | 
            +
            )  
         | 
| 121 | 
            +
                with gr.Tab('Huggingface Diffusion'):
         | 
| 122 | 
             
                    with gr.Column(scale=2):
         | 
| 123 | 
             
                        with gr.Group():
         | 
| 124 | 
             
                            txt_input = gr.Textbox(label='Your prompt:', lines=4)
         | 
|  | |
| 131 | 
             
                                    steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
         | 
| 132 | 
             
                                    cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
         | 
| 133 | 
             
                                    seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
         | 
| 134 | 
            +
                                    seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
         | 
| 135 | 
            +
                                    seed_rand.click(randomize_seed, None, [seed], queue=False)
         | 
| 136 | 
             
                        with gr.Row():
         | 
| 137 | 
             
                            gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', variant='primary', scale=3)
         | 
| 138 | 
             
                            random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
         | 
|  | |
| 144 | 
             
                        with gr.Group():
         | 
| 145 | 
             
                            with gr.Row():
         | 
| 146 | 
             
                                output = [gr.Image(label=m, show_download_button=True, elem_classes="output",
         | 
| 147 | 
            +
                                          interactive=False, width=112, height=112, show_share_button=False, format="png",
         | 
| 148 | 
             
                                          visible=True) for m in default_models]
         | 
| 149 | 
             
                                current_models = [gr.Textbox(m, visible=False) for m in default_models]
         | 
| 150 |  | 
|  | |
| 181 | 
             
                                    steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
         | 
| 182 | 
             
                                    cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
         | 
| 183 | 
             
                                    seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
         | 
| 184 | 
            +
                                    seed_rand2 = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
         | 
| 185 | 
            +
                                    seed_rand2.click(randomize_seed, None, [seed2], queue=False)
         | 
| 186 | 
             
                        num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
         | 
| 187 | 
             
                        with gr.Row():
         | 
| 188 | 
             
                            gen_button2 = gr.Button('Generate', variant='primary', scale=2)
         | 
|  | |
| 193 | 
             
                        with gr.Group():
         | 
| 194 | 
             
                            with gr.Row():
         | 
| 195 | 
             
                                output2 = [gr.Image(label='', show_download_button=True, elem_classes="output",
         | 
| 196 | 
            +
                                           interactive=False, width=112, height=112, visible=True, format="png",
         | 
| 197 | 
             
                                           show_share_button=False, show_label=False) for _ in range(max_images)]
         | 
| 198 |  | 
| 199 | 
             
                    with gr.Column(scale=2):
         | 
|  | |
| 214 |  | 
| 215 | 
             
                gr.Markdown("Based on the [TestGen](https://huggingface.co/spaces/derwahnsinn/TestGen) Space by derwahnsinn, the [SpacIO](https://huggingface.co/spaces/RdnUser77/SpacIO_v1) Space by RdnUser77 and Omnibus's Maximum Multiplier!")
         | 
| 216 |  | 
| 217 | 
            +
            #demo.queue(default_concurrency_limit=200, max_size=200)
         | 
| 218 | 
             
            demo.launch(show_api=False, max_threads=400)
         | 
| 219 | 
             
            # https://github.com/gradio-app/gradio/issues/6339
         | 
    	
        externalmod.py
    CHANGED
    
    | @@ -583,3 +583,30 @@ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="l | |
| 583 | 
             
                       models.append(model.id)
         | 
| 584 | 
             
                       if len(models) == limit: break
         | 
| 585 | 
             
                return models
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 583 | 
             
                       models.append(model.id)
         | 
| 584 | 
             
                       if len(models) == limit: break
         | 
| 585 | 
             
                return models
         | 
| 586 | 
            +
             | 
| 587 | 
            +
             | 
| 588 | 
            +
            def save_image(image, savefile, modelname, prompt, nprompt, height=0, width=0, steps=0, cfg=0, seed=-1):
         | 
| 589 | 
            +
                from PIL import Image, PngImagePlugin
         | 
| 590 | 
            +
                import json
         | 
| 591 | 
            +
                try:
         | 
| 592 | 
            +
                    metadata = {"prompt": prompt, "negative_prompt": nprompt, "Model": {"Model": modelname.split("/")[-1]}}
         | 
| 593 | 
            +
                    if steps > 0: metadata["num_inference_steps"] = steps
         | 
| 594 | 
            +
                    if cfg > 0: metadata["guidance_scale"] = cfg
         | 
| 595 | 
            +
                    if seed != -1: metadata["seed"] = seed
         | 
| 596 | 
            +
                    if width > 0 and height > 0: metadata["resolution"] = f"{width} x {height}"
         | 
| 597 | 
            +
                    metadata_str = json.dumps(metadata)
         | 
| 598 | 
            +
                    info = PngImagePlugin.PngInfo()
         | 
| 599 | 
            +
                    info.add_text("metadata", metadata_str)
         | 
| 600 | 
            +
                    image.save(savefile, "PNG", pnginfo=info)
         | 
| 601 | 
            +
                    return str(Path(savefile).resolve())
         | 
| 602 | 
            +
                except Exception as e:
         | 
| 603 | 
            +
                    print(f"Failed to save image file: {e}")
         | 
| 604 | 
            +
                    raise Exception(f"Failed to save image file:") from e
         | 
| 605 | 
            +
             | 
| 606 | 
            +
             | 
| 607 | 
            +
            def randomize_seed():
         | 
| 608 | 
            +
                from random import seed, randint
         | 
| 609 | 
            +
                MAX_SEED = 2**32-1
         | 
| 610 | 
            +
                seed()
         | 
| 611 | 
            +
                rseed = randint(0, MAX_SEED)
         | 
| 612 | 
            +
                return rseed
         | 

