Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -346,7 +346,7 @@ numpy==1.26.4''' | |
| 346 | 
             
                # The subprocess call for autotrain spacerunner
         | 
| 347 | 
             
                api = HfApi(token=token)
         | 
| 348 | 
             
                username = api.whoami()["name"]
         | 
| 349 | 
            -
                subprocess_command = ["autotrain", "spacerunner", "--project-name", slugged_lora_name, "--script-path", spacerunner_folder, "--username", username, "--token", token, "--backend", "spaces- | 
| 350 | 
             
                outcome = subprocess.run(subprocess_command)
         | 
| 351 | 
             
                if(outcome.returncode == 0):
         | 
| 352 | 
             
                    return f"""# Your training has started. 
         | 
| @@ -358,13 +358,13 @@ numpy==1.26.4''' | |
| 358 |  | 
| 359 | 
             
            def calculate_price(iterations, with_prior_preservation):
         | 
| 360 | 
             
                if(with_prior_preservation):
         | 
| 361 | 
            -
                    seconds_per_iteration =  | 
| 362 | 
             
                else:
         | 
| 363 | 
            -
                    seconds_per_iteration =  | 
| 364 | 
             
                total_seconds = (iterations * seconds_per_iteration) + 210
         | 
| 365 | 
            -
                cost_per_second = 1. | 
| 366 | 
             
                cost = round(cost_per_second * total_seconds, 2)
         | 
| 367 | 
            -
                return f'''To train this LoRA, we will duplicate the space and hook an  | 
| 368 | 
             
            ## Estimated to cost <b>< US$ {str(cost)}</b> for {round(int(total_seconds)/60, 2)} minutes with your current train settings <small>({int(iterations)} iterations at {seconds_per_iteration}s/it)</small>
         | 
| 369 | 
             
            #### ↓ to continue, grab you <b>write</b> token [here](https://huggingface.co/settings/tokens) and enter it below ↓'''
         | 
| 370 |  | 
|  | |
| 346 | 
             
                # The subprocess call for autotrain spacerunner
         | 
| 347 | 
             
                api = HfApi(token=token)
         | 
| 348 | 
             
                username = api.whoami()["name"]
         | 
| 349 | 
            +
                subprocess_command = ["autotrain", "spacerunner", "--project-name", slugged_lora_name, "--script-path", spacerunner_folder, "--username", username, "--token", token, "--backend", "spaces-l40sx1", "--env",f"HF_TOKEN={token};HF_HUB_ENABLE_HF_TRANSFER=1", "--args", spacerunner_args]
         | 
| 350 | 
             
                outcome = subprocess.run(subprocess_command)
         | 
| 351 | 
             
                if(outcome.returncode == 0):
         | 
| 352 | 
             
                    return f"""# Your training has started. 
         | 
|  | |
| 358 |  | 
| 359 | 
             
            def calculate_price(iterations, with_prior_preservation):
         | 
| 360 | 
             
                if(with_prior_preservation):
         | 
| 361 | 
            +
                    seconds_per_iteration = 1.48
         | 
| 362 | 
             
                else:
         | 
| 363 | 
            +
                    seconds_per_iteration = 1.25
         | 
| 364 | 
             
                total_seconds = (iterations * seconds_per_iteration) + 210
         | 
| 365 | 
            +
                cost_per_second = 1.80/60/60
         | 
| 366 | 
             
                cost = round(cost_per_second * total_seconds, 2)
         | 
| 367 | 
            +
                return f'''To train this LoRA, we will duplicate the space and hook an L40S GPU under the hood.
         | 
| 368 | 
             
            ## Estimated to cost <b>< US$ {str(cost)}</b> for {round(int(total_seconds)/60, 2)} minutes with your current train settings <small>({int(iterations)} iterations at {seconds_per_iteration}s/it)</small>
         | 
| 369 | 
             
            #### ↓ to continue, grab you <b>write</b> token [here](https://huggingface.co/settings/tokens) and enter it below ↓'''
         | 
| 370 |  | 
