Luigi commited on
Commit
c066678
·
1 Parent(s): 1b9d615

Update requirements.txt with spaces package and temporarily remove GPU decorator

Browse files
Files changed (2) hide show
  1. app.py +3 -5
  2. requirements.txt +1 -0
app.py CHANGED
@@ -9,7 +9,6 @@ import tempfile
9
  import gradio as gr
10
  import torch
11
  from pathlib import Path
12
- from spaces import GPU
13
 
14
  # Add current directory to Python path for local zipvoice package
15
  sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
@@ -34,8 +33,8 @@ def load_models_and_components(model_name: str):
34
  """Load and cache models, tokenizer, vocoder, and feature extractor."""
35
  global _models_cache, _tokenizer_cache, _vocoder_cache, _feature_extractor_cache
36
 
37
- # Set device (GPU if available, otherwise CPU)
38
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
39
 
40
  if model_name not in _models_cache:
41
  print(f"Loading {model_name} model...")
@@ -101,7 +100,6 @@ def load_models_and_components(model_name: str):
101
  model_config["feature"]["sampling_rate"])
102
 
103
 
104
- @GPU
105
  def synthesize_speech_gradio(
106
  text: str,
107
  prompt_audio_file,
@@ -126,7 +124,7 @@ def synthesize_speech_gradio(
126
  # Load models and components
127
  model, tokenizer, vocoder, feature_extractor, sampling_rate = load_models_and_components(model_name)
128
 
129
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
130
 
131
  # Save uploaded audio to temporary file
132
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio:
 
9
  import gradio as gr
10
  import torch
11
  from pathlib import Path
 
12
 
13
  # Add current directory to Python path for local zipvoice package
14
  sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
 
33
  """Load and cache models, tokenizer, vocoder, and feature extractor."""
34
  global _models_cache, _tokenizer_cache, _vocoder_cache, _feature_extractor_cache
35
 
36
+ # Set device (CPU for Spaces)
37
+ device = torch.device("cpu")
38
 
39
  if model_name not in _models_cache:
40
  print(f"Loading {model_name} model...")
 
100
  model_config["feature"]["sampling_rate"])
101
 
102
 
 
103
  def synthesize_speech_gradio(
104
  text: str,
105
  prompt_audio_file,
 
124
  # Load models and components
125
  model, tokenizer, vocoder, feature_extractor, sampling_rate = load_models_and_components(model_name)
126
 
127
+ device = torch.device("cpu")
128
 
129
  # Save uploaded audio to temporary file
130
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio:
requirements.txt CHANGED
@@ -10,6 +10,7 @@ tensorboard
10
  vocos
11
  pydub
12
  gradio>=4.44.0
 
13
 
14
  # Normalization
15
  cn2an
 
10
  vocos
11
  pydub
12
  gradio>=4.44.0
13
+ spaces
14
 
15
  # Normalization
16
  cn2an