Taejin commited on
Commit
1dd84ea
·
1 Parent(s): f059506

Adding safetensor for HF transformer support

Browse files

Signed-off-by: taejinp <[email protected]>

Files changed (3) hide show
  1. config.json +79 -0
  2. model.safetensors +3 -0
  3. processor_config.json +16 -0
config.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SortformerOffline"
4
+ ],
5
+ "ats_weight": 0.5,
6
+ "dtype": "float32",
7
+ "fc_encoder_config": {
8
+ "activation_dropout": 0.1,
9
+ "attention_bias": true,
10
+ "attention_dropout": 0.1,
11
+ "conv_kernel_size": 9,
12
+ "dropout": 0.1,
13
+ "dropout_positions": 0.0,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 512,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 2048,
18
+ "layerdrop": 0.1,
19
+ "max_position_embeddings": 5000,
20
+ "model_type": "sortformer_fc_encoder",
21
+ "num_attention_heads": 8,
22
+ "num_hidden_layers": 18,
23
+ "num_key_value_heads": 8,
24
+ "num_mel_bins": 80,
25
+ "scale_input": true,
26
+ "subsampling_conv_channels": 256,
27
+ "subsampling_conv_kernel_size": 3,
28
+ "subsampling_conv_stride": 2,
29
+ "subsampling_factor": 8
30
+ },
31
+ "initializer_range": 0.02,
32
+ "model_type": "sortformer",
33
+ "modules_config": {
34
+ "causal_attn_rate": 0.5,
35
+ "causal_attn_rc": 30,
36
+ "chunk_left_context": 1,
37
+ "chunk_len": 188,
38
+ "chunk_right_context": 1,
39
+ "dropout_rate": 0.5,
40
+ "fc_d_model": 512,
41
+ "fifo_len": 0,
42
+ "max_index": 10000,
43
+ "min_pos_scores_rate": 0.5,
44
+ "model_type": "sortformer_modules",
45
+ "num_speakers": 4,
46
+ "pred_score_threshold": 1e-06,
47
+ "scores_add_rnd": 2.0,
48
+ "scores_boost_latest": 0.5,
49
+ "sil_threshold": 0.1,
50
+ "spkcache_len": 188,
51
+ "spkcache_sil_frames_per_spk": 5,
52
+ "spkcache_update_period": 188,
53
+ "strong_boost_rate": 0.3,
54
+ "subsampling_factor": 8,
55
+ "tf_d_model": 192,
56
+ "weak_boost_rate": 0.7
57
+ },
58
+ "num_speakers": 4,
59
+ "pil_weight": 0.5,
60
+ "tf_encoder_config": {
61
+ "activation_dropout": 0.5,
62
+ "activation_function": "relu",
63
+ "attention_dropout": 0.5,
64
+ "d_model": 192,
65
+ "dropout": 0.5,
66
+ "encoder_attention_heads": 8,
67
+ "encoder_ffn_dim": 768,
68
+ "encoder_layerdrop": 0.5,
69
+ "encoder_layers": 18,
70
+ "init_std": 0.02,
71
+ "initializer_range": 0.02,
72
+ "layer_norm_eps": 1e-05,
73
+ "max_source_positions": 1500,
74
+ "model_type": "sortformer_tf_encoder",
75
+ "num_mel_bins": 80,
76
+ "scale_embedding": false
77
+ },
78
+ "transformers_version": "5.0.0.dev0"
79
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8abcc5f3a82ff23134c98a37f70fef3f159611f394bb191a0ad0a6f4b052974
3
+ size 494206256
processor_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "feature_extractor": {
3
+ "feature_extractor_type": "SortformerFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 512,
7
+ "padding_side": "right",
8
+ "padding_value": 0.0,
9
+ "preemphasis": 0.97,
10
+ "processor_class": "SortformerProcessor",
11
+ "return_attention_mask": true,
12
+ "sampling_rate": 16000,
13
+ "win_length": 400
14
+ },
15
+ "processor_class": "SortformerProcessor"
16
+ }