Make model compatible with Transformers v4.49.0 (#40)
Browse files- Make model compatible with Transformers v4.49.0 (ae440694f5af374760e184d6c45b5a8ec4f67301)
Co-authored-by: Harry Mellor <[email protected]>
- modeling_minicpmo.py +3 -3
modeling_minicpmo.py
CHANGED
|
@@ -42,7 +42,7 @@ from transformers import AutoProcessor
|
|
| 42 |
from transformers import BertTokenizerFast
|
| 43 |
from transformers import LlamaConfig
|
| 44 |
from transformers import LlamaModel
|
| 45 |
-
from transformers import
|
| 46 |
from transformers import PreTrainedModel
|
| 47 |
from transformers import Qwen2ForCausalLM
|
| 48 |
from transformers import Qwen2PreTrainedModel
|
|
@@ -2919,7 +2919,7 @@ class ConditionalChatTTS(PreTrainedModel):
|
|
| 2919 |
force_no_stop=False,
|
| 2920 |
min_new_token=10,
|
| 2921 |
max_new_token=50,
|
| 2922 |
-
logits_warpers: List[
|
| 2923 |
logits_processors: List[CustomRepetitionPenaltyLogitsProcessorRepeat] = [],
|
| 2924 |
show_tqdm=False,
|
| 2925 |
):
|
|
@@ -2937,7 +2937,7 @@ class ConditionalChatTTS(PreTrainedModel):
|
|
| 2937 |
eos_token (Union[int, torch.Tensor]): End of sequence token.
|
| 2938 |
streaming_tts_text_mask (Optional[torch.Tensor], optional): Mask for streaming TTS text. Defaults to None.
|
| 2939 |
max_new_token (int, optional): Maximum number of new tokens to generate. Defaults to 50.
|
| 2940 |
-
logits_warpers (List[
|
| 2941 |
logits_processors (List[CustomRepetitionPenaltyLogitsProcessorRepeat], optional): List of logits processors. Defaults to [].
|
| 2942 |
show_tqdm (bool, optional): Whether to show progress bar. Defaults to True.
|
| 2943 |
|
|
|
|
| 42 |
from transformers import BertTokenizerFast
|
| 43 |
from transformers import LlamaConfig
|
| 44 |
from transformers import LlamaModel
|
| 45 |
+
from transformers import LogitsProcessor
|
| 46 |
from transformers import PreTrainedModel
|
| 47 |
from transformers import Qwen2ForCausalLM
|
| 48 |
from transformers import Qwen2PreTrainedModel
|
|
|
|
| 2919 |
force_no_stop=False,
|
| 2920 |
min_new_token=10,
|
| 2921 |
max_new_token=50,
|
| 2922 |
+
logits_warpers: List[LogitsProcessor] = [],
|
| 2923 |
logits_processors: List[CustomRepetitionPenaltyLogitsProcessorRepeat] = [],
|
| 2924 |
show_tqdm=False,
|
| 2925 |
):
|
|
|
|
| 2937 |
eos_token (Union[int, torch.Tensor]): End of sequence token.
|
| 2938 |
streaming_tts_text_mask (Optional[torch.Tensor], optional): Mask for streaming TTS text. Defaults to None.
|
| 2939 |
max_new_token (int, optional): Maximum number of new tokens to generate. Defaults to 50.
|
| 2940 |
+
logits_warpers (List[LogitsProcessor], optional): List of logits processors. Defaults to [].
|
| 2941 |
logits_processors (List[CustomRepetitionPenaltyLogitsProcessorRepeat], optional): List of logits processors. Defaults to [].
|
| 2942 |
show_tqdm (bool, optional): Whether to show progress bar. Defaults to True.
|
| 2943 |
|