File size: 1,724 Bytes
f26459c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98a9f76
f26459c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import json
from sentence_transformers import SentenceTransformer, util

class EmojiPredictor:
    def __init__(self, model_path, emoji_data_path):
        self.model = SentenceTransformer(model_path)
        self.emoji_data = self._load_emoji_data(emoji_data_path)
        self.description_vectors = self._vectorize_descriptions()

    def _load_emoji_data(self, emoji_data_path):
        with open(emoji_data_path, 'r') as f:
            return json.load(f)

    def _vectorize_descriptions(self):
        # Get the sentence embedding for each description
        descriptions = [item['description'] for item in self.emoji_data]
        return self.model.encode(descriptions)

    def predict(self, text):
        # Get the sentence embedding for the input text
        text_vector = self.model.encode([text])[0]
        
        from sentence_transformers import util

        # Reshape vectors for cosine similarity calculation
        text_vector_reshaped = text_vector.reshape((1, -1))
        
        # Calculate cosine similarity using sentence_transformers.util.cos_sim
        similarities = util.cos_sim(self.description_vectors, text_vector_reshaped).flatten()
        
        # Commented out: Manual cosine similarity calculation
        # dot_products = np.dot(self.description_vectors, text_vector_reshaped.T).flatten()
        # norms = np.linalg.norm(self.description_vectors, axis=1) * np.linalg.norm(text_vector_reshaped)
        # similarities = np.divide(dot_products, norms, out=np.zeros_like(dot_products), where=norms!=0)
        
        # Find the index of the most similar description
        most_similar_index = similarities.argmax()
        return self.emoji_data[most_similar_index]['emoji']