Datasets:
Languages:
English
Size:
10K - 100K
Tags:
sarcasm
sarcasm-detection
mulitmodal-sarcasm-detection
sarcasm detection
multimodao sarcasm detection
tweets
DOI:
License:
Update README.md
Browse files
README.md
CHANGED
|
@@ -114,31 +114,95 @@ This is a copy of the dataset uploaded on Hugging Face for easy access. The orig
|
|
| 114 |
## Usage
|
| 115 |
|
| 116 |
```python
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
| 120 |
from torch.utils.data import DataLoader
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
-
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 123 |
-
|
| 124 |
-
def tokenization(example):
|
| 125 |
-
inputs = processor(
|
| 126 |
-
text=example["text"], images=example["image"], return_tensors="pt"
|
| 127 |
-
)
|
| 128 |
-
return {
|
| 129 |
-
"pixel_values": inputs["pixel_values"],
|
| 130 |
-
"input_ids": inputs["input_ids"],
|
| 131 |
-
"attention_mask": inputs["attention_mask"],
|
| 132 |
-
"label": example["label"],
|
| 133 |
-
}
|
| 134 |
-
|
| 135 |
-
dataset = load_dataset('coderchen01/MMSD2.0', 'mmsd-v2')
|
| 136 |
-
dataset.set_transform(tokenization)
|
| 137 |
-
|
| 138 |
-
# get torch dataloader
|
| 139 |
-
train_dl = DataLoader(dataset['train'], batch_size=256, shuffle=True)
|
| 140 |
-
test_dl = DataLoader(dataset['test'], batch_size=256, shuffle=True)
|
| 141 |
-
val_dl = DataLoader(dataset['validation'], batch_size=256, shuffle=True)
|
| 142 |
```
|
| 143 |
|
| 144 |
## References
|
|
|
|
| 114 |
## Usage
|
| 115 |
|
| 116 |
```python
|
| 117 |
+
from typing import TypedDict, cast
|
| 118 |
+
|
| 119 |
+
import pytorch_lightning as pl
|
| 120 |
+
from datasets import Dataset, load_dataset
|
| 121 |
+
from torch import Tensor
|
| 122 |
from torch.utils.data import DataLoader
|
| 123 |
+
from transformers import CLIPProcessor
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class MMSDModelInput(TypedDict):
|
| 127 |
+
pixel_values: Tensor
|
| 128 |
+
input_ids: Tensor
|
| 129 |
+
attention_mask: Tensor
|
| 130 |
+
label: Tensor
|
| 131 |
+
id: list[str]
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class MMSDDatasetModule(pl.LightningDataModule):
|
| 135 |
+
|
| 136 |
+
def __init__(
|
| 137 |
+
self,
|
| 138 |
+
clip_ckpt_name: str = "openai/clip-vit-base-patch32",
|
| 139 |
+
dataset_version: str = "mmsd-v2",
|
| 140 |
+
max_length: int = 77,
|
| 141 |
+
train_batch_size: int = 32,
|
| 142 |
+
val_batch_size: int = 32,
|
| 143 |
+
test_batch_size: int = 32,
|
| 144 |
+
num_workers: int = 19,
|
| 145 |
+
) -> None:
|
| 146 |
+
super().__init__()
|
| 147 |
+
self.clip_ckpt_name = clip_ckpt_name
|
| 148 |
+
self.dataset_version = dataset_version
|
| 149 |
+
self.train_batch_size = train_batch_size
|
| 150 |
+
self.val_batch_size = val_batch_size
|
| 151 |
+
self.test_batch_size = test_batch_size
|
| 152 |
+
self.num_workers = num_workers
|
| 153 |
+
self.max_length = max_length
|
| 154 |
+
|
| 155 |
+
def setup(self, stage: str) -> None:
|
| 156 |
+
processor = CLIPProcessor.from_pretrained(self.clip_ckpt_name)
|
| 157 |
+
|
| 158 |
+
def preprocess(example):
|
| 159 |
+
inputs = processor(
|
| 160 |
+
text=example["text"],
|
| 161 |
+
images=example["image"],
|
| 162 |
+
return_tensors="pt",
|
| 163 |
+
padding="max_length",
|
| 164 |
+
truncation=True,
|
| 165 |
+
max_length=self.max_length,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
return {
|
| 169 |
+
"pixel_values": inputs["pixel_values"],
|
| 170 |
+
"input_ids": inputs["input_ids"],
|
| 171 |
+
"attention_mask": inputs["attention_mask"],
|
| 172 |
+
"label": example["label"],
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
self.raw_dataset = cast(
|
| 176 |
+
Dataset,
|
| 177 |
+
load_dataset("coderchen01/MMSD2.0", name=self.dataset_version),
|
| 178 |
+
)
|
| 179 |
+
self.dataset = self.raw_dataset.map(
|
| 180 |
+
preprocess,
|
| 181 |
+
batched=True,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
def train_dataloader(self) -> DataLoader:
|
| 185 |
+
return DataLoader(
|
| 186 |
+
self.dataset["train"],
|
| 187 |
+
batch_size=self.train_batch_size,
|
| 188 |
+
shuffle=True,
|
| 189 |
+
num_workers=self.num_workers,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
def val_dataloader(self) -> DataLoader:
|
| 193 |
+
return DataLoader(
|
| 194 |
+
self.dataset["validation"],
|
| 195 |
+
batch_size=self.val_batch_size,
|
| 196 |
+
num_workers=self.num_workers,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
def test_dataloader(self) -> DataLoader:
|
| 200 |
+
return DataLoader(
|
| 201 |
+
self.dataset["test"],
|
| 202 |
+
batch_size=self.test_batch_size,
|
| 203 |
+
num_workers=self.num_workers,
|
| 204 |
+
)
|
| 205 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
```
|
| 207 |
|
| 208 |
## References
|