Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +7 -2
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -13,6 +13,9 @@ model_id = "nvidia/canary-qwen-2.5b"
|
|
| 13 |
print("Loading NVIDIA Canary-Qwen-2.5B model using NeMo...")
|
| 14 |
model = SALM.from_pretrained(model_id)
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
def generate_text(prompt, max_tokens=200, temperature=0.7, top_p=0.9):
|
| 17 |
"""Generate text using the NVIDIA NeMo model (LLM mode)"""
|
| 18 |
|
|
@@ -28,7 +31,8 @@ def generate_text(prompt, max_tokens=200, temperature=0.7, top_p=0.9):
|
|
| 28 |
)
|
| 29 |
|
| 30 |
# Convert IDs to text using model's tokenizer
|
| 31 |
-
response = model.tokenizer.ids_to_text(answer_ids[0].cpu())
|
|
|
|
| 32 |
return response
|
| 33 |
|
| 34 |
except Exception as e:
|
|
@@ -50,7 +54,8 @@ def transcribe_audio(audio_file, user_prompt="Transcribe the following:"):
|
|
| 50 |
)
|
| 51 |
|
| 52 |
# Convert IDs to text
|
| 53 |
-
transcript = model.tokenizer.ids_to_text(answer_ids[0].cpu())
|
|
|
|
| 54 |
return transcript
|
| 55 |
|
| 56 |
except Exception as e:
|
|
|
|
| 13 |
print("Loading NVIDIA Canary-Qwen-2.5B model using NeMo...")
|
| 14 |
model = SALM.from_pretrained(model_id)
|
| 15 |
|
| 16 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 17 |
+
model = model.to(device)
|
| 18 |
+
|
| 19 |
def generate_text(prompt, max_tokens=200, temperature=0.7, top_p=0.9):
|
| 20 |
"""Generate text using the NVIDIA NeMo model (LLM mode)"""
|
| 21 |
|
|
|
|
| 31 |
)
|
| 32 |
|
| 33 |
# Convert IDs to text using model's tokenizer
|
| 34 |
+
# response = model.tokenizer.ids_to_text(answer_ids[0].cpu())
|
| 35 |
+
response = model.tokenizer.ids_to_text(answer_ids[0].to(device))
|
| 36 |
return response
|
| 37 |
|
| 38 |
except Exception as e:
|
|
|
|
| 54 |
)
|
| 55 |
|
| 56 |
# Convert IDs to text
|
| 57 |
+
# transcript = model.tokenizer.ids_to_text(answer_ids[0].cpu())
|
| 58 |
+
transcript = model.tokenizer.ids_to_text(answer_ids[0].to(device))
|
| 59 |
return transcript
|
| 60 |
|
| 61 |
except Exception as e:
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
torch
|
| 2 |
gradio>=4.0.0
|
| 3 |
nemo_toolkit[asr,tts] @ git+https://github.com/NVIDIA/NeMo.git
|
| 4 |
accelerate>=0.20.0
|
|
|
|
| 1 |
+
torch==2.6.0+cu118 --extra-index-url https://download.pytorch.org/whl/cu118
|
| 2 |
gradio>=4.0.0
|
| 3 |
nemo_toolkit[asr,tts] @ git+https://github.com/NVIDIA/NeMo.git
|
| 4 |
accelerate>=0.20.0
|