Spaces:
Runtime error
Runtime error
Commit
·
f87de06
1
Parent(s):
6b49c52
Add form validation
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ import tarfile
|
|
| 14 |
import urllib.parse
|
| 15 |
import gc
|
| 16 |
from diffusers import StableDiffusionPipeline
|
| 17 |
-
from huggingface_hub import snapshot_download
|
| 18 |
|
| 19 |
|
| 20 |
is_spaces = True if "SPACE_ID" in os.environ else False
|
|
@@ -130,11 +130,33 @@ def pad_image(image):
|
|
| 130 |
new_image.paste(image, ((h - w) // 2, 0))
|
| 131 |
return new_image
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
def train(*inputs):
|
| 134 |
if is_shared_ui:
|
| 135 |
raise gr.Error("This Space only works in duplicated instances")
|
| 136 |
if not is_gpu_associated:
|
| 137 |
raise gr.Error("Please associate a T4 GPU for this Space")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
torch.cuda.empty_cache()
|
| 139 |
if 'pipe' in globals():
|
| 140 |
global pipe, pipe_is_set
|
|
@@ -170,7 +192,6 @@ def train(*inputs):
|
|
| 170 |
os.makedirs('output_model',exist_ok=True)
|
| 171 |
uses_custom = inputs[-1]
|
| 172 |
type_of_thing = inputs[-4]
|
| 173 |
-
remove_attribution_after = inputs[-6]
|
| 174 |
experimental_face_improvement = inputs[-9]
|
| 175 |
|
| 176 |
if(uses_custom):
|
|
@@ -276,8 +297,6 @@ def train(*inputs):
|
|
| 276 |
gr.update(visible=True) #completed_training
|
| 277 |
]
|
| 278 |
else:
|
| 279 |
-
hf_token = inputs[-5]
|
| 280 |
-
model_name = inputs[-7]
|
| 281 |
where_to_upload = inputs[-8]
|
| 282 |
push(model_name, where_to_upload, hf_token, which_model, True)
|
| 283 |
hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
|
|
@@ -300,6 +319,7 @@ def generate(prompt, steps):
|
|
| 300 |
return(image)
|
| 301 |
|
| 302 |
def push(model_name, where_to_upload, hf_token, which_model, comes_from_automated=False):
|
|
|
|
| 303 |
if(not os.path.exists("model.ckpt")):
|
| 304 |
convert("output_model", "model.ckpt")
|
| 305 |
from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
|
|
@@ -313,7 +333,8 @@ def push(model_name, where_to_upload, hf_token, which_model, comes_from_automate
|
|
| 313 |
model_id = f"sd-dreambooth-library/{model_name_slug}"
|
| 314 |
headers = {"Authorization" : f"Bearer: {hf_token}", "Content-Type": "application/json"}
|
| 315 |
response = requests.post("https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", headers=headers)
|
| 316 |
-
|
|
|
|
| 317 |
images_upload = os.listdir("instance_images")
|
| 318 |
image_string = ""
|
| 319 |
instance_prompt_list = []
|
|
@@ -384,7 +405,7 @@ Sample pictures of:
|
|
| 384 |
else:
|
| 385 |
extra_message = "The GPU has been removed automatically as requested, and you can try the model via the model page"
|
| 386 |
api.create_discussion(repo_id=os.environ['SPACE_ID'], title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!", description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}",repo_type="space", token=hf_token)
|
| 387 |
-
|
| 388 |
return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])]
|
| 389 |
|
| 390 |
def convert_to_ckpt():
|
|
|
|
| 14 |
import urllib.parse
|
| 15 |
import gc
|
| 16 |
from diffusers import StableDiffusionPipeline
|
| 17 |
+
from huggingface_hub import snapshot_download, update_repo_visibility, HfApi
|
| 18 |
|
| 19 |
|
| 20 |
is_spaces = True if "SPACE_ID" in os.environ else False
|
|
|
|
| 130 |
new_image.paste(image, ((h - w) // 2, 0))
|
| 131 |
return new_image
|
| 132 |
|
| 133 |
+
def validate_model_upload(hf_token, model_name):
|
| 134 |
+
if(hf_token != ''):
|
| 135 |
+
api = HfApi()
|
| 136 |
+
try:
|
| 137 |
+
_ = api.whoami(hf_token)
|
| 138 |
+
except:
|
| 139 |
+
raise gr.Error("You have inserted an invalid Hugging Face token")
|
| 140 |
+
try:
|
| 141 |
+
update_repo_visibility(repo_id=os.environ['SPACE_ID'], private=True, token=hf_token, repo_type="space")
|
| 142 |
+
except:
|
| 143 |
+
raise gr.Error("Oops, you created a Hugging Face token with read permissions only. You need one with write permissions")
|
| 144 |
+
else:
|
| 145 |
+
raise gr.Error("Please insert a Hugging Face Token (make sure to create it with write permissions)")
|
| 146 |
+
if(model_name == ""):
|
| 147 |
+
raise gr.Error("Please fill in your model's name")
|
| 148 |
+
|
| 149 |
def train(*inputs):
|
| 150 |
if is_shared_ui:
|
| 151 |
raise gr.Error("This Space only works in duplicated instances")
|
| 152 |
if not is_gpu_associated:
|
| 153 |
raise gr.Error("Please associate a T4 GPU for this Space")
|
| 154 |
+
hf_token = inputs[-5]
|
| 155 |
+
model_name = inputs[-7]
|
| 156 |
+
remove_attribution_after = inputs[-6]
|
| 157 |
+
if(remove_attribution_after):
|
| 158 |
+
validate_model_upload(hf_token, model_name)
|
| 159 |
+
|
| 160 |
torch.cuda.empty_cache()
|
| 161 |
if 'pipe' in globals():
|
| 162 |
global pipe, pipe_is_set
|
|
|
|
| 192 |
os.makedirs('output_model',exist_ok=True)
|
| 193 |
uses_custom = inputs[-1]
|
| 194 |
type_of_thing = inputs[-4]
|
|
|
|
| 195 |
experimental_face_improvement = inputs[-9]
|
| 196 |
|
| 197 |
if(uses_custom):
|
|
|
|
| 297 |
gr.update(visible=True) #completed_training
|
| 298 |
]
|
| 299 |
else:
|
|
|
|
|
|
|
| 300 |
where_to_upload = inputs[-8]
|
| 301 |
push(model_name, where_to_upload, hf_token, which_model, True)
|
| 302 |
hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
|
|
|
|
| 319 |
return(image)
|
| 320 |
|
| 321 |
def push(model_name, where_to_upload, hf_token, which_model, comes_from_automated=False):
|
| 322 |
+
validate_model_upload(hf_token, model_name)
|
| 323 |
if(not os.path.exists("model.ckpt")):
|
| 324 |
convert("output_model", "model.ckpt")
|
| 325 |
from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
|
|
|
|
| 333 |
model_id = f"sd-dreambooth-library/{model_name_slug}"
|
| 334 |
headers = {"Authorization" : f"Bearer: {hf_token}", "Content-Type": "application/json"}
|
| 335 |
response = requests.post("https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", headers=headers)
|
| 336 |
+
|
| 337 |
+
print(f"Starting to upload the model {model_id}...")
|
| 338 |
images_upload = os.listdir("instance_images")
|
| 339 |
image_string = ""
|
| 340 |
instance_prompt_list = []
|
|
|
|
| 405 |
else:
|
| 406 |
extra_message = "The GPU has been removed automatically as requested, and you can try the model via the model page"
|
| 407 |
api.create_discussion(repo_id=os.environ['SPACE_ID'], title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!", description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}",repo_type="space", token=hf_token)
|
| 408 |
+
print("Model uploaded successfully!")
|
| 409 |
return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])]
|
| 410 |
|
| 411 |
def convert_to_ckpt():
|