Datasets:
Languages:
English
Size:
10K - 100K
Tags:
sarcasm
sarcasm-detection
mulitmodal-sarcasm-detection
sarcasm detection
multimodao sarcasm detection
tweets
DOI:
License:
Update README.md
Browse files
README.md
CHANGED
|
@@ -116,19 +116,21 @@ This is a copy of the dataset uploaded on Hugging Face for easy access. The orig
|
|
| 116 |
```python
|
| 117 |
# usage
|
| 118 |
from datasets import load_dataset
|
| 119 |
-
from transformers import
|
| 120 |
from torch.utils.data import DataLoader
|
| 121 |
|
| 122 |
-
|
| 123 |
-
tokenizer = CLIPTokenizer.from_pretrained(clip_path)
|
| 124 |
|
| 125 |
def tokenization(example):
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
dataset = load_dataset('coderchen01/MMSD2.0', 'mmsd-v2')
|
| 134 |
dataset.set_transform(tokenization)
|
|
|
|
| 116 |
```python
|
| 117 |
# usage
|
| 118 |
from datasets import load_dataset
|
| 119 |
+
from transformers import CLIPProcessor
|
| 120 |
from torch.utils.data import DataLoader
|
| 121 |
|
| 122 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
| 123 |
|
| 124 |
def tokenization(example):
|
| 125 |
+
inputs = processor(
|
| 126 |
+
text=example["text"], images=example["image"], return_tensors="pt"
|
| 127 |
+
)
|
| 128 |
+
return {
|
| 129 |
+
"pixel_values": inputs["pixel_values"],
|
| 130 |
+
"input_ids": inputs["input_ids"],
|
| 131 |
+
"attention_mask": inputs["attention_mask"],
|
| 132 |
+
"label": example["label"],
|
| 133 |
+
}
|
| 134 |
|
| 135 |
dataset = load_dataset('coderchen01/MMSD2.0', 'mmsd-v2')
|
| 136 |
dataset.set_transform(tokenization)
|