Commit
·
d79204c
1
Parent(s):
06c667b
Update README.md
Browse files
README.md
CHANGED
|
@@ -32,14 +32,14 @@ The original model can be found under https://github.com/pytorch/fairseq/tree/ma
|
|
| 32 |
To transcribe audio files the model can be used as a standalone acoustic model as follows:
|
| 33 |
|
| 34 |
```python
|
| 35 |
-
from transformers import Wav2Vec2Tokenizer,
|
| 36 |
from datasets import load_dataset
|
| 37 |
import soundfile as sf
|
| 38 |
import torch
|
| 39 |
|
| 40 |
# load model and tokenizer
|
| 41 |
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
|
| 42 |
-
model =
|
| 43 |
|
| 44 |
# define function to read in sound file
|
| 45 |
def map_to_array(batch):
|
|
@@ -68,7 +68,7 @@ To transcribe audio files the model can be used as a standalone acoustic model a
|
|
| 68 |
|
| 69 |
```python
|
| 70 |
from datasets import load_dataset
|
| 71 |
-
from transformers import
|
| 72 |
import soundfile as sf
|
| 73 |
import torch
|
| 74 |
from jiwer import wer
|
|
@@ -76,7 +76,7 @@ from jiwer import wer
|
|
| 76 |
|
| 77 |
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
|
| 78 |
|
| 79 |
-
model =
|
| 80 |
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
|
| 81 |
|
| 82 |
def map_to_array(batch):
|
|
|
|
| 32 |
To transcribe audio files the model can be used as a standalone acoustic model as follows:
|
| 33 |
|
| 34 |
```python
|
| 35 |
+
from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForCTC
|
| 36 |
from datasets import load_dataset
|
| 37 |
import soundfile as sf
|
| 38 |
import torch
|
| 39 |
|
| 40 |
# load model and tokenizer
|
| 41 |
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
|
| 42 |
+
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
| 43 |
|
| 44 |
# define function to read in sound file
|
| 45 |
def map_to_array(batch):
|
|
|
|
| 68 |
|
| 69 |
```python
|
| 70 |
from datasets import load_dataset
|
| 71 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
|
| 72 |
import soundfile as sf
|
| 73 |
import torch
|
| 74 |
from jiwer import wer
|
|
|
|
| 76 |
|
| 77 |
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
|
| 78 |
|
| 79 |
+
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
|
| 80 |
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
|
| 81 |
|
| 82 |
def map_to_array(batch):
|