AnjaJuana commited on
Commit
6e82d28
·
1 Parent(s): b63caa8

Auto-update from GitHub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .ipynb_checkpoints/main_model-checkpoint.ipynb +0 -0
  2. main_model.ipynb +0 -0
  3. packing_label_structure.json +6 -6
  4. packing_templates_self_supported_offgrid_expanded.json +3 -3
  5. results/.DS_Store +0 -0
  6. results/MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli_results.pkl +3 -0
  7. results/MoritzLaurer-DeBERTa-v3-large-mnli-fever-anli-ling-wanli_results.pkl +3 -0
  8. results/MoritzLaurer-deberta-v3-large-zeroshot-v2.0_results.pkl +3 -0
  9. results/MoritzLaurer-mDeBERTa-v3-base-mnli-xnli_results.pkl +3 -0
  10. results/before/MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli_results.pkl +3 -0
  11. results/before/MoritzLaurer-DeBERTa-v3-large-mnli-fever-anli-ling-wanli_results.pkl +3 -0
  12. results/before/MoritzLaurer-deberta-v3-large-zeroshot-v2.0_results.pkl +3 -0
  13. results/before/MoritzLaurer-mDeBERTa-v3-base-mnli-xnli_results.pkl +3 -0
  14. results/before/cross-encoder-nli-deberta-v3-base_results.pkl +3 -0
  15. results/before/cross-encoder-nli-deberta-v3-large_results.pkl +3 -0
  16. results/before/facebook-bart-large-mnli_results.pkl +3 -0
  17. results/before/joeddav-bart-large-mnli-yahoo-answers_results.pkl +3 -0
  18. results/before/valhalla-distilbart-mnli-12-1_results.pkl +3 -0
  19. results/cross-encoder-nli-deberta-v3-base_results.pkl +3 -0
  20. results/cross-encoder-nli-deberta-v3-large_results.pkl +3 -0
  21. results/facebook-bart-large-mnli_results.pkl +3 -0
  22. results/joeddav-bart-large-mnli-yahoo-answers_results.pkl +3 -0
  23. results/valhalla-distilbart-mnli-12-1_results.pkl +3 -0
  24. space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb +170 -165
  25. space/gradio_tryout.ipynb +153 -87
  26. space/packing_label_structure.json +6 -6
  27. space/packing_templates_self_supported_offgrid_expanded.json +3 -3
  28. space/results/model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli_results.pkl +3 -0
  29. space/space/results/model_b_sileod-deberta-v3-base-tasksource-nli_results.pkl +3 -0
  30. space/space/space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb +0 -0
  31. space/space/space/packing_label_hierarchical_mapping.json +2 -2
  32. space/space/space/packing_label_structure.json +2 -2
  33. space/space/space/packing_templates_self_supported_offgrid_expanded.json +2 -2
  34. space/space/space/results/model_a_facebook-bart-large-mnli_results.pkl +3 -0
  35. space/space/space/space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb +0 -0
  36. space/space/space/space/gradio_tryout.ipynb +327 -131
  37. space/space/space/space/space/space/space/.DS_Store +0 -0
  38. space/space/space/space/space/space/space/Candidate labels in Word en idee.docx +0 -0
  39. space/space/space/space/space/space/space/gradio_tryout.ipynb +329 -62
  40. space/space/space/space/space/space/space/packing_label_hierarchical_mapping.json +290 -0
  41. space/space/space/space/space/space/space/packing_label_structure.json +96 -0
  42. space/space/space/space/space/space/space/packing_list_api.ipynb +198 -59
  43. space/space/space/space/space/space/space/packing_templates_self_supported_offgrid_expanded.json +696 -0
  44. space/space/space/space/space/space/space/space/labels.txt +93 -89
  45. space/space/space/space/space/space/space/space/packing_list_api.ipynb +110 -67
  46. space/space/space/space/space/space/space/space/space/.ipynb_checkpoints/app-checkpoint.py +18 -18
  47. space/space/space/space/space/space/space/space/space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb +182 -2
  48. space/space/space/space/space/space/space/space/space/app.py +18 -18
  49. space/space/space/space/space/space/space/space/space/gradio_tryout.ipynb +50 -6
  50. space/space/space/space/space/space/space/space/space/packing_list_api.ipynb +7 -143
.ipynb_checkpoints/main_model-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
main_model.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
packing_label_structure.json CHANGED
@@ -49,7 +49,8 @@
49
  "warm destination / summer",
50
  "variable weather / spring / autumn",
51
  "tropical / humid",
52
- "dry / desert-like"
 
53
  ],
54
  "style_or_comfort": [
55
  "ultralight",
@@ -74,14 +75,13 @@
74
  ],
75
  "special_conditions": [
76
  "off-grid / no electricity",
77
- "self-supported (bring your own food/cooking)",
78
- "child-friendly",
79
  "pet-friendly",
80
- "rainy climate",
81
  "snow and ice",
82
  "high alpine terrain",
83
- "avalanche-prone terrain",
84
- "no special conditions"
85
  ],
86
  "trip_length_days": [
87
  "1 day",
 
49
  "warm destination / summer",
50
  "variable weather / spring / autumn",
51
  "tropical / humid",
52
+ "dry / desert-like",
53
+ "rainy climate"
54
  ],
55
  "style_or_comfort": [
56
  "ultralight",
 
75
  ],
76
  "special_conditions": [
77
  "off-grid / no electricity",
78
+ "self-supported (bring your own cooking gear)",
79
+ "travel with children",
80
  "pet-friendly",
 
81
  "snow and ice",
82
  "high alpine terrain",
83
+ "snow, ice and avalanche-prone terrain",
84
+ "no special conditions to consider"
85
  ],
86
  "trip_length_days": [
87
  "1 day",
packing_templates_self_supported_offgrid_expanded.json CHANGED
@@ -535,7 +535,7 @@
535
  "USB-hub (voor meerdere devices)",
536
  "verpakking om elektronica droog te houden"
537
  ],
538
- "self-supported (bring your own food/cooking)": [
539
  "lichtgewicht kooktoestel (gas, benzine of alcohol)",
540
  "brandstof (voldoende voor aantal dagen)",
541
  "pan of keteltje",
@@ -552,7 +552,7 @@
552
  "minstens 2 liter wateropslag per persoon",
553
  "food bag of hangzak voor voedsel (wild-safe)"
554
  ],
555
- "child-friendly": [
556
  "snacks en speelgoed",
557
  "EHBO-set met pleisters",
558
  "extra kleding",
@@ -606,7 +606,7 @@
606
  "extra voeding",
607
  "EHBO-kit"
608
  ],
609
- "avalanche-prone terrain": [
610
  "lawinepieper",
611
  "schep",
612
  "sonde",
 
535
  "USB-hub (voor meerdere devices)",
536
  "verpakking om elektronica droog te houden"
537
  ],
538
+ "self-supported (bring your own cooking gear)": [
539
  "lichtgewicht kooktoestel (gas, benzine of alcohol)",
540
  "brandstof (voldoende voor aantal dagen)",
541
  "pan of keteltje",
 
552
  "minstens 2 liter wateropslag per persoon",
553
  "food bag of hangzak voor voedsel (wild-safe)"
554
  ],
555
+ "travel with children": [
556
  "snacks en speelgoed",
557
  "EHBO-set met pleisters",
558
  "extra kleding",
 
606
  "extra voeding",
607
  "EHBO-kit"
608
  ],
609
+ "snow, ice and avalanche-prone terrain": [
610
  "lawinepieper",
611
  "schep",
612
  "sonde",
results/.DS_Store ADDED
Binary file (6.15 kB). View file
 
results/MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ff7524d88a78292aac59dd808d6409da3094dca6c480e702c723de033a64fee
3
+ size 9616
results/MoritzLaurer-DeBERTa-v3-large-mnli-fever-anli-ling-wanli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20bab421410028527f62eb5dafe383c6062dcdef4f335f99079283a5c77700e5
3
+ size 9526
results/MoritzLaurer-deberta-v3-large-zeroshot-v2.0_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37383ba39c8fbf9fa4e0f5ab2f263a94044d30fa55c7002f2dde059371e2744a
3
+ size 9424
results/MoritzLaurer-mDeBERTa-v3-base-mnli-xnli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eaa0e36c292a34907b242d890251cb378e113852a472711e469d6dd246a50a7
3
+ size 9544
results/before/MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af6a30fdc19d830ebe35e091878231b84618663781d1ed86b1f53507065d20e5
3
+ size 9533
results/before/MoritzLaurer-DeBERTa-v3-large-mnli-fever-anli-ling-wanli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e11eadfa64ac378e24c1df9c8f952b19fcca05ca634068506713d25ef1461e2
3
+ size 9509
results/before/MoritzLaurer-deberta-v3-large-zeroshot-v2.0_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c1b48262c7ff16332488e83aaa29703469463fae39fb950aeb055cfb6bdbc70
3
+ size 9356
results/before/MoritzLaurer-mDeBERTa-v3-base-mnli-xnli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55172023628edf78e2663cdfb75f18852fbb1979d74332140176bcad4017a98d
3
+ size 9562
results/before/cross-encoder-nli-deberta-v3-base_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5116d85a6fd3d1669b8c307934df35aa87bdbb8e89573a05851dcdf6078a4db
3
+ size 9643
results/before/cross-encoder-nli-deberta-v3-large_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b354f0cb55f0b398614195155e66a537d5b12f9dd657a370bfbf7f6c2c9ca5bd
3
+ size 9463
results/before/facebook-bart-large-mnli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bee4588344312e7c93436e88f58257935a273f90ec7422a9a0fab77b57540a2e
3
+ size 9427
results/before/joeddav-bart-large-mnli-yahoo-answers_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c17c9d623753ad38339bdf9e4204ff34b2ddf6ffc11b8077617da6349aff816
3
+ size 9718
results/before/valhalla-distilbart-mnli-12-1_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45d4d12f3880836626c96e5dddad7293aa8124abec5eb187845a0637cdd93df1
3
+ size 9277
results/cross-encoder-nli-deberta-v3-base_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce310367d9c5baec72cef673cf1608ee1b11fc7239c459b38f4ffb86cbe7ddff
3
+ size 9670
results/cross-encoder-nli-deberta-v3-large_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b363db77b85d99ed32e0daa026f31cfdd7d001ab5f4782abecd2f11f2a06281
3
+ size 9602
results/facebook-bart-large-mnli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e00867ad478907a6e514e97d342ef8915ee2c486912ff7056e6dfc5faa11278
3
+ size 9489
results/joeddav-bart-large-mnli-yahoo-answers_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:175935f34f69404b1662d4572a543ddd77938bc5ee53751034e2ddb07f6b4d49
3
+ size 9694
results/valhalla-distilbart-mnli-12-1_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:779ac1118c668f3c090dafe28608c624b0acac270bff55ddb7babff36ee33f09
3
+ size 9374
space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb CHANGED
@@ -19,113 +19,15 @@
19
  "id": "3bbee2e4-55c8-4b06-9929-72026edf7932",
20
  "metadata": {},
21
  "source": [
22
- "**Load and try the model**"
23
  ]
24
  },
25
  {
26
  "cell_type": "code",
27
- "execution_count": 36,
28
  "id": "f8c28d2d-8458-49fd-8ebf-5e729d6e861f",
29
  "metadata": {},
30
  "outputs": [
31
- {
32
- "data": {
33
- "application/vnd.jupyter.widget-view+json": {
34
- "model_id": "9ec45d8fb4e247e4b1188972547ebb7f",
35
- "version_major": 2,
36
- "version_minor": 0
37
- },
38
- "text/plain": [
39
- "config.json: 0%| | 0.00/1.09k [00:00<?, ?B/s]"
40
- ]
41
- },
42
- "metadata": {},
43
- "output_type": "display_data"
44
- },
45
- {
46
- "data": {
47
- "application/vnd.jupyter.widget-view+json": {
48
- "model_id": "991968ef7a0448b39ca84316e6d06902",
49
- "version_major": 2,
50
- "version_minor": 0
51
- },
52
- "text/plain": [
53
- "model.safetensors: 0%| | 0.00/369M [00:00<?, ?B/s]"
54
- ]
55
- },
56
- "metadata": {},
57
- "output_type": "display_data"
58
- },
59
- {
60
- "data": {
61
- "application/vnd.jupyter.widget-view+json": {
62
- "model_id": "561c0cdcc88f4cd6b89dd19bcc0599cb",
63
- "version_major": 2,
64
- "version_minor": 0
65
- },
66
- "text/plain": [
67
- "tokenizer_config.json: 0%| | 0.00/1.28k [00:00<?, ?B/s]"
68
- ]
69
- },
70
- "metadata": {},
71
- "output_type": "display_data"
72
- },
73
- {
74
- "data": {
75
- "application/vnd.jupyter.widget-view+json": {
76
- "model_id": "c5e517b2512246018e5cb1fdc8d3e1e1",
77
- "version_major": 2,
78
- "version_minor": 0
79
- },
80
- "text/plain": [
81
- "spm.model: 0%| | 0.00/2.46M [00:00<?, ?B/s]"
82
- ]
83
- },
84
- "metadata": {},
85
- "output_type": "display_data"
86
- },
87
- {
88
- "data": {
89
- "application/vnd.jupyter.widget-view+json": {
90
- "model_id": "03b470dec7174fd29b9b0e4afab759a0",
91
- "version_major": 2,
92
- "version_minor": 0
93
- },
94
- "text/plain": [
95
- "tokenizer.json: 0%| | 0.00/8.66M [00:00<?, ?B/s]"
96
- ]
97
- },
98
- "metadata": {},
99
- "output_type": "display_data"
100
- },
101
- {
102
- "data": {
103
- "application/vnd.jupyter.widget-view+json": {
104
- "model_id": "16d24496b31b4c0fb7744c8ee1222d40",
105
- "version_major": 2,
106
- "version_minor": 0
107
- },
108
- "text/plain": [
109
- "added_tokens.json: 0%| | 0.00/23.0 [00:00<?, ?B/s]"
110
- ]
111
- },
112
- "metadata": {},
113
- "output_type": "display_data"
114
- },
115
- {
116
- "data": {
117
- "application/vnd.jupyter.widget-view+json": {
118
- "model_id": "ef2aefc365734c1884bbbb04d79edf5b",
119
- "version_major": 2,
120
- "version_minor": 0
121
- },
122
- "text/plain": [
123
- "special_tokens_map.json: 0%| | 0.00/286 [00:00<?, ?B/s]"
124
- ]
125
- },
126
- "metadata": {},
127
- "output_type": "display_data"
128
- },
129
  {
130
  "name": "stderr",
131
  "output_type": "stream",
@@ -152,10 +54,14 @@
152
  "import matplotlib.pyplot as plt\n",
153
  "import pickle\n",
154
  "import os\n",
 
155
  "\n",
156
  "# Load the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
157
- "classifier = pipeline(\"zero-shot-classification\", model=\"MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli\")\n",
 
158
  "# tried:\n",
 
 
159
  "# facebook/bart-large-mnli\n",
160
  "# sileod/deberta-v3-base-tasksource-nli\n",
161
  "\n",
@@ -182,7 +88,7 @@
182
  },
183
  {
184
  "cell_type": "code",
185
- "execution_count": 37,
186
  "id": "3a762755-872d-43a6-b666-874d6133488c",
187
  "metadata": {},
188
  "outputs": [],
@@ -212,7 +118,7 @@
212
  },
213
  {
214
  "cell_type": "code",
215
- "execution_count": 38,
216
  "id": "3b4f3193-3bdd-453c-8664-df84f955600c",
217
  "metadata": {},
218
  "outputs": [],
@@ -249,87 +155,85 @@
249
  },
250
  {
251
  "cell_type": "code",
252
- "execution_count": 39,
253
  "id": "4dd01755-be8d-4904-8494-ac28aba2fee7",
254
  "metadata": {
255
  "scrolled": true
256
  },
257
  "outputs": [
258
- {
259
- "name": "stderr",
260
- "output_type": "stream",
261
- "text": [
262
- "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
263
- ]
264
- },
265
  {
266
  "name": "stdout",
267
  "output_type": "stream",
268
  "text": [
269
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['beach vacation', 'micro-adventure / weekend trip', 'cultural exploration', 'nature escape', 'digital nomad trip', 'camping trip (campground)', 'camping trip (wild camping)', 'long-distance hike / thru-hike', 'ski tour / skitour', 'hut trek (summer)', 'city trip', 'hut trek (winter)', 'road trip (car/camper)', 'festival trip', 'yoga / wellness retreat', 'snowboard / splitboard trip'], 'scores': [0.37198853492736816, 0.31496119499206543, 0.10890532284975052, 0.09102731198072433, 0.0735681876540184, 0.012933704070746899, 0.009422042407095432, 0.0051276967860758305, 0.004056071396917105, 0.0017408831045031548, 0.001503779087215662, 0.0014244643971323967, 0.0013752576196566224, 0.0009292717440985143, 0.0006881792796775699, 0.0003480584127828479]}\n",
270
- "beach vacation\n",
271
  "0\n",
272
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['relaxing', 'hiking', 'going to the beach', 'photography', 'sightseeing', 'hut-to-hut hiking', 'snorkeling', 'snowshoe hiking', 'yoga', 'stand-up paddleboarding (SUP)', 'kayaking / canoeing', 'horseback riding', 'swimming', 'paragliding', 'rafting', 'biking', 'rock climbing', 'surfing', 'running', 'ice climbing', 'cross-country skiing', 'fishing', 'ski touring', 'skiing', 'scuba diving'], 'scores': [0.9943736791610718, 0.9631249308586121, 0.9454535841941833, 0.7538902759552002, 0.4525446593761444, 0.1696157604455948, 0.05957728251814842, 0.04234873503446579, 0.01991761103272438, 0.016971556469798088, 0.006959819234907627, 0.00411367928609252, 0.0030609173700213432, 0.00186573073733598, 0.0017515394138172269, 0.00142807571683079, 0.0005748369731009007, 0.00037779140984639525, 0.0003097739245276898, 0.00030914091621525586, 0.0002725012309383601, 0.00027050732751376927, 0.00024376016517635435, 0.00017392759036738425, 0.00014787293912377208]}\n",
273
- "['relaxing', 'hiking', 'going to the beach', 'photography']\n",
274
  "1\n",
275
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['tropical / humid', 'warm destination / summer', 'variable weather / spring / autumn', 'cold destination / winter', 'dry / desert-like'], 'scores': [0.4895477890968323, 0.25917261838912964, 0.24829530715942383, 0.0017174285603687167, 0.0012668712297454476]}\n",
276
- "tropical / humid\n",
277
  "2\n",
278
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['minimalist', 'luxury (including evening wear)', 'lightweight (but comfortable)', 'ultralight'], 'scores': [0.7574900984764099, 0.09964746236801147, 0.07804173231124878, 0.06482075154781342]}\n",
279
  "minimalist\n",
280
  "3\n",
281
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['casual', 'conservative', 'formal (business trip)'], 'scores': [0.8163393139839172, 0.11898067593574524, 0.06467998772859573]}\n",
282
  "casual\n",
283
  "4\n",
284
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['indoor', 'huts with half board', 'sleeping in a car', 'sleeping in a tent'], 'scores': [0.6389047503471375, 0.18624886870384216, 0.13902997970581055, 0.03581654652953148]}\n",
285
  "indoor\n",
286
  "5\n",
287
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['no own vehicle', 'own vehicle'], 'scores': [0.9990958571434021, 0.0009041387238539755]}\n",
288
  "no own vehicle\n",
289
- "6\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  ]
291
  },
292
  {
293
- "ename": "KeyboardInterrupt",
294
- "evalue": "",
295
  "output_type": "error",
296
  "traceback": [
297
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
298
- "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
299
- "Cell \u001b[0;32mIn[39], line 7\u001b[0m\n\u001b[1;32m 5\u001b[0m current_trip \u001b[38;5;241m=\u001b[39m trip_descriptions[i]\n\u001b[1;32m 6\u001b[0m current_type \u001b[38;5;241m=\u001b[39m trip_types[i]\n\u001b[0;32m----> 7\u001b[0m df \u001b[38;5;241m=\u001b[39m \u001b[43mpred_trip\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcurrent_trip\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcurrent_type\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcut_off\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0.5\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28mprint\u001b[39m(df)\n\u001b[1;32m 10\u001b[0m \u001b[38;5;66;03m# accuracy, perc true classes identified and perc wrong pred classes\u001b[39;00m\n",
300
- "Cell \u001b[0;32mIn[37], line 14\u001b[0m, in \u001b[0;36mpred_trip\u001b[0;34m(trip_descr, trip_type, cut_off)\u001b[0m\n\u001b[1;32m 12\u001b[0m classes \u001b[38;5;241m=\u001b[39m [result[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlabels\u001b[39m\u001b[38;5;124m'\u001b[39m][i] \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m indices]\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m---> 14\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mclassifier\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrip_descr\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcandidate_labels\u001b[49m\u001b[43m[\u001b[49m\u001b[43mkey\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 15\u001b[0m classes \u001b[38;5;241m=\u001b[39m result[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlabels\u001b[39m\u001b[38;5;124m\"\u001b[39m][\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28mprint\u001b[39m(result)\n",
301
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/zero_shot_classification.py:206\u001b[0m, in \u001b[0;36mZeroShotClassificationPipeline.__call__\u001b[0;34m(self, sequences, *args, **kwargs)\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 204\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnable to understand extra arguments \u001b[39m\u001b[38;5;132;01m{\u001b[39;00margs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 206\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__call__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43msequences\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
302
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/base.py:1294\u001b[0m, in \u001b[0;36mPipeline.__call__\u001b[0;34m(self, inputs, num_workers, batch_size, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39miterate(inputs, preprocess_params, forward_params, postprocess_params)\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mframework \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m, ChunkPipeline):\n\u001b[0;32m-> 1294\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1295\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43miter\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1296\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_iterator\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1297\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_workers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpreprocess_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mforward_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpostprocess_params\u001b[49m\n\u001b[1;32m 1298\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1299\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1300\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1301\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1302\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrun_single(inputs, preprocess_params, forward_params, postprocess_params)\n",
303
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/pt_utils.py:124\u001b[0m, in \u001b[0;36mPipelineIterator.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloader_batch_item()\n\u001b[1;32m 123\u001b[0m \u001b[38;5;66;03m# We're out of items within a batch\u001b[39;00m\n\u001b[0;32m--> 124\u001b[0m item \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 125\u001b[0m processed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfer(item, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparams)\n\u001b[1;32m 126\u001b[0m \u001b[38;5;66;03m# We now have a batch of \"inferred things\".\u001b[39;00m\n",
304
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/pt_utils.py:269\u001b[0m, in \u001b[0;36mPipelinePackIterator.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m accumulator\n\u001b[1;32m 268\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_last:\n\u001b[0;32m--> 269\u001b[0m processed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minfer\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 270\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloader_batch_size \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 271\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(processed, torch\u001b[38;5;241m.\u001b[39mTensor):\n",
305
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/base.py:1209\u001b[0m, in \u001b[0;36mPipeline.forward\u001b[0;34m(self, model_inputs, **forward_params)\u001b[0m\n\u001b[1;32m 1207\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m inference_context():\n\u001b[1;32m 1208\u001b[0m model_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_inputs, device\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[0;32m-> 1209\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mforward_params\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1210\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_outputs, device\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mdevice(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcpu\u001b[39m\u001b[38;5;124m\"\u001b[39m))\n\u001b[1;32m 1211\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
306
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/zero_shot_classification.py:229\u001b[0m, in \u001b[0;36mZeroShotClassificationPipeline._forward\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 227\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muse_cache\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m inspect\u001b[38;5;241m.\u001b[39msignature(model_forward)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m 228\u001b[0m model_inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muse_cache\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[0;32m--> 229\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 231\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 232\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcandidate_label\u001b[39m\u001b[38;5;124m\"\u001b[39m: candidate_label,\n\u001b[1;32m 233\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msequence\u001b[39m\u001b[38;5;124m\"\u001b[39m: sequence,\n\u001b[1;32m 234\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mis_last\u001b[39m\u001b[38;5;124m\"\u001b[39m: inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mis_last\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[1;32m 235\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39moutputs,\n\u001b[1;32m 236\u001b[0m }\n\u001b[1;32m 237\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model_outputs\n",
307
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
308
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
309
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:1297\u001b[0m, in \u001b[0;36mDebertaV2ForSequenceClassification.forward\u001b[0;34m(self, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1289\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1290\u001b[0m \u001b[38;5;124;03mlabels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\u001b[39;00m\n\u001b[1;32m 1291\u001b[0m \u001b[38;5;124;03m Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\u001b[39;00m\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;124;03m config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\u001b[39;00m\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;124;03m `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\u001b[39;00m\n\u001b[1;32m 1294\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1295\u001b[0m return_dict \u001b[38;5;241m=\u001b[39m return_dict \u001b[38;5;28;01mif\u001b[39;00m return_dict \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_return_dict\n\u001b[0;32m-> 1297\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdeberta\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1298\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1299\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken_type_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken_type_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1300\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1301\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1302\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1303\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1304\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1305\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1306\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1308\u001b[0m encoder_layer \u001b[38;5;241m=\u001b[39m outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 1309\u001b[0m pooled_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpooler(encoder_layer)\n",
310
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
311
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
312
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:1063\u001b[0m, in \u001b[0;36mDebertaV2Model.forward\u001b[0;34m(self, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1053\u001b[0m token_type_ids \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mzeros(input_shape, dtype\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mlong, device\u001b[38;5;241m=\u001b[39mdevice)\n\u001b[1;32m 1055\u001b[0m embedding_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membeddings(\n\u001b[1;32m 1056\u001b[0m input_ids\u001b[38;5;241m=\u001b[39minput_ids,\n\u001b[1;32m 1057\u001b[0m token_type_ids\u001b[38;5;241m=\u001b[39mtoken_type_ids,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1060\u001b[0m inputs_embeds\u001b[38;5;241m=\u001b[39minputs_embeds,\n\u001b[1;32m 1061\u001b[0m )\n\u001b[0;32m-> 1063\u001b[0m encoder_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1064\u001b[0m \u001b[43m \u001b[49m\u001b[43membedding_output\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1065\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1066\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 1067\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1068\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1069\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1070\u001b[0m encoded_layers \u001b[38;5;241m=\u001b[39m encoder_outputs[\u001b[38;5;241m1\u001b[39m]\n\u001b[1;32m 1072\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mz_steps \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m:\n",
313
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
314
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
315
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:507\u001b[0m, in \u001b[0;36mDebertaV2Encoder.forward\u001b[0;34m(self, hidden_states, attention_mask, output_hidden_states, output_attentions, query_states, relative_pos, return_dict)\u001b[0m\n\u001b[1;32m 497\u001b[0m output_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_gradient_checkpointing_func(\n\u001b[1;32m 498\u001b[0m layer_module\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__call__\u001b[39m,\n\u001b[1;32m 499\u001b[0m next_kv,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 504\u001b[0m output_attentions,\n\u001b[1;32m 505\u001b[0m )\n\u001b[1;32m 506\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 507\u001b[0m output_states \u001b[38;5;241m=\u001b[39m \u001b[43mlayer_module\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 508\u001b[0m \u001b[43m \u001b[49m\u001b[43mnext_kv\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 509\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 510\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 511\u001b[0m \u001b[43m \u001b[49m\u001b[43mrelative_pos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_pos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 512\u001b[0m \u001b[43m \u001b[49m\u001b[43mrel_embeddings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrel_embeddings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 513\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 514\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 516\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 517\u001b[0m output_states, att_m \u001b[38;5;241m=\u001b[39m output_states\n",
316
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
317
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
318
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:366\u001b[0m, in \u001b[0;36mDebertaV2Layer.forward\u001b[0;34m(self, hidden_states, attention_mask, query_states, relative_pos, rel_embeddings, output_attentions)\u001b[0m\n\u001b[1;32m 364\u001b[0m attention_output, att_matrix \u001b[38;5;241m=\u001b[39m attention_output\n\u001b[1;32m 365\u001b[0m intermediate_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mintermediate(attention_output)\n\u001b[0;32m--> 366\u001b[0m layer_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43moutput\u001b[49m\u001b[43m(\u001b[49m\u001b[43mintermediate_output\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mattention_output\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 367\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 368\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (layer_output, att_matrix)\n",
319
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
320
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
321
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:332\u001b[0m, in \u001b[0;36mDebertaV2Output.forward\u001b[0;34m(self, hidden_states, input_tensor)\u001b[0m\n\u001b[1;32m 331\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, hidden_states, input_tensor):\n\u001b[0;32m--> 332\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdense\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 333\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdropout(hidden_states)\n\u001b[1;32m 334\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mLayerNorm(hidden_states \u001b[38;5;241m+\u001b[39m input_tensor)\n",
322
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
323
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
324
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/linear.py:116\u001b[0m, in \u001b[0;36mLinear.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlinear\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
325
- "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
326
  ]
327
  }
328
  ],
329
  "source": [
330
  "result_list = []\n",
331
  "performance = pd.DataFrame(columns=['accuracy', 'true_ident', 'false_pred'])\n",
332
- " \n",
 
 
333
  "for i in range(len(trip_descriptions)):\n",
334
  " current_trip = trip_descriptions[i]\n",
335
  " current_type = trip_types[i]\n",
@@ -340,7 +244,11 @@
340
  " performance = pd.concat([performance, perf_measure(df)])\n",
341
  " print(performance)\n",
342
  " \n",
343
- " result_list.append(df)"
 
 
 
 
344
  ]
345
  },
346
  {
@@ -353,10 +261,22 @@
353
  },
354
  {
355
  "cell_type": "code",
356
- "execution_count": null,
357
  "id": "eb33fd31-94e6-40b5-9c36-a32effe77c01",
358
  "metadata": {},
359
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
360
  "source": [
361
  "# Extract \"same_value\" column from each DataFrame\n",
362
  "sv_columns = [df['same_value'] for df in result_list] # 'same' needs to be changed\n",
@@ -370,10 +290,27 @@
370
  },
371
  {
372
  "cell_type": "code",
373
- "execution_count": null,
374
  "id": "bf7546cb-79ce-49ad-8cee-54d02239220c",
375
  "metadata": {},
376
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  "source": [
378
  "# Compute accuracy per superclass (row means of same_value matrix excluding the first column)\n",
379
  "row_means = sv_df.iloc[:, 1:].mean(axis=1)\n",
@@ -413,15 +350,14 @@
413
  "outputs": [],
414
  "source": [
415
  "# save results\n",
416
- "# Example data for one model\n",
417
- "model_name = 'model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli'\n",
418
  "# Structure to save\n",
419
  "model_result = {\n",
420
  " 'model': model_name,\n",
421
  " 'predictions': result_list,\n",
422
  " 'performance': performance,\n",
423
  " 'perf_summary': column_means,\n",
424
- " 'perf_superclass': df_row_means\n",
 
425
  "}\n",
426
  "\n",
427
  "# File path with folder\n",
@@ -432,6 +368,16 @@
432
  " pickle.dump(model_result, f)"
433
  ]
434
  },
 
 
 
 
 
 
 
 
 
 
435
  {
436
  "cell_type": "markdown",
437
  "id": "e1cbb54e-abe6-49b6-957e-0683196f3199",
@@ -442,14 +388,23 @@
442
  },
443
  {
444
  "cell_type": "code",
445
- "execution_count": 35,
446
  "id": "62ca82b0-6909-4e6c-9d2c-fed87971e5b6",
447
- "metadata": {},
 
 
448
  "outputs": [
449
  {
450
  "name": "stdout",
451
  "output_type": "stream",
452
  "text": [
 
 
 
 
 
 
 
453
  "Model: model_a_facebook-bart-large-mnli\n",
454
  "Performance Summary:\n",
455
  "accuracy 0.454545\n",
@@ -464,6 +419,19 @@
464
  "false_pred 0.551667\n",
465
  "dtype: float64\n",
466
  "----------------------------------------\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
467
  "Model: model_a_facebook-bart-large-mnli\n",
468
  "Performance Summary:\n",
469
  " superclass accuracy\n",
@@ -511,20 +479,57 @@
511
  " result = pickle.load(f)\n",
512
  " all_results[model_name] = result\n",
513
  "\n",
514
- "# Now you can compare performance across models\n",
515
  "for model, data in all_results.items():\n",
516
  " print(f\"Model: {model}\")\n",
517
  " print(f\"Performance Summary:\\n{data['perf_summary']}\")\n",
518
  " print(\"-\" * 40)\n",
519
  "\n",
520
  "\n",
521
- "# Now you can compare performance across models\n",
522
  "for model, data in all_results.items():\n",
523
  " print(f\"Model: {model}\")\n",
524
  " print(f\"Performance Summary:\\n{data['perf_superclass']}\")\n",
525
  " print(\"-\" * 40)"
526
  ]
527
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
528
  {
529
  "cell_type": "markdown",
530
  "id": "17483df4-55c4-41cd-b8a9-61f7a5c7e8a3",
 
19
  "id": "3bbee2e4-55c8-4b06-9929-72026edf7932",
20
  "metadata": {},
21
  "source": [
22
+ "**Load prerequisites**"
23
  ]
24
  },
25
  {
26
  "cell_type": "code",
27
+ "execution_count": 57,
28
  "id": "f8c28d2d-8458-49fd-8ebf-5e729d6e861f",
29
  "metadata": {},
30
  "outputs": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  {
32
  "name": "stderr",
33
  "output_type": "stream",
 
54
  "import matplotlib.pyplot as plt\n",
55
  "import pickle\n",
56
  "import os\n",
57
+ "import time\n",
58
  "\n",
59
  "# Load the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
60
+ "classifier = pipeline(\"zero-shot-classification\", model=\"cross-encoder/nli-deberta-v3-base\")\n",
61
+ "model_name = 'model_cross-encoder-nli-deberta-v3-base'\n",
62
  "# tried:\n",
63
+ "# cross-encoder/nli-deberta-v3-large gave error\n",
64
+ "# MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli\n",
65
  "# facebook/bart-large-mnli\n",
66
  "# sileod/deberta-v3-base-tasksource-nli\n",
67
  "\n",
 
88
  },
89
  {
90
  "cell_type": "code",
91
+ "execution_count": 58,
92
  "id": "3a762755-872d-43a6-b666-874d6133488c",
93
  "metadata": {},
94
  "outputs": [],
 
118
  },
119
  {
120
  "cell_type": "code",
121
+ "execution_count": 59,
122
  "id": "3b4f3193-3bdd-453c-8664-df84f955600c",
123
  "metadata": {},
124
  "outputs": [],
 
155
  },
156
  {
157
  "cell_type": "code",
158
+ "execution_count": 60,
159
  "id": "4dd01755-be8d-4904-8494-ac28aba2fee7",
160
  "metadata": {
161
  "scrolled": true
162
  },
163
  "outputs": [
 
 
 
 
 
 
 
164
  {
165
  "name": "stdout",
166
  "output_type": "stream",
167
  "text": [
168
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['micro-adventure / weekend trip', 'digital nomad trip', 'beach vacation', 'festival trip', 'city trip', 'cultural exploration', 'road trip (car/camper)', 'camping trip (wild camping)', 'long-distance hike / thru-hike', 'hut trek (winter)', 'ski tour / skitour', 'snowboard / splitboard trip', 'nature escape', 'yoga / wellness retreat', 'hut trek (summer)', 'camping trip (campground)'], 'scores': [0.9722680449485779, 0.007802918087691069, 0.0075571718625724316, 0.0022959215566515923, 0.0021305829286575317, 0.001222927705384791, 0.0009879637509584427, 0.000805296644102782, 0.0007946204277686775, 0.0007107199053280056, 0.0007009899127297103, 0.0006353880744427443, 0.0005838185315951705, 0.0005424902774393559, 0.0004807499353773892, 0.0004804217896889895]}\n",
169
+ "micro-adventure / weekend trip\n",
170
  "0\n",
171
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['going to the beach', 'sightseeing', 'relaxing', 'hiking', 'hut-to-hut hiking', 'stand-up paddleboarding (SUP)', 'photography', 'biking', 'running', 'ski touring', 'snowshoe hiking', 'yoga', 'kayaking / canoeing', 'horseback riding', 'rafting', 'paragliding', 'cross-country skiing', 'surfing', 'skiing', 'ice climbing', 'fishing', 'snorkeling', 'swimming', 'rock climbing', 'scuba diving'], 'scores': [0.4660525321960449, 0.007281942293047905, 0.003730606520548463, 0.0001860307966126129, 0.00014064949937164783, 0.00011034693307010457, 5.2949126256862655e-05, 3.828677654382773e-05, 3.396756437723525e-05, 1.5346524378401227e-05, 9.348185812996235e-06, 8.182429155567661e-06, 6.5973340497293975e-06, 6.271920938161202e-06, 5.544673058466287e-06, 5.299102667777333e-06, 4.855380211665761e-06, 4.506250661506783e-06, 3.949530764657538e-06, 3.730233856913401e-06, 3.297281637060223e-06, 3.0508665531669976e-06, 2.933618134193239e-06, 2.6379277642263332e-06, 2.2992651338427095e-06]}\n",
172
+ "[]\n",
173
  "1\n",
174
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['variable weather / spring / autumn', 'warm destination / summer', 'cold destination / winter', 'dry / desert-like', 'tropical / humid'], 'scores': [0.5934922695159912, 0.17430798709392548, 0.10943299531936646, 0.07068652659654617, 0.05208020657300949]}\n",
175
+ "variable weather / spring / autumn\n",
176
  "2\n",
177
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['minimalist', 'ultralight', 'luxury (including evening wear)', 'lightweight (but comfortable)'], 'scores': [0.6965053081512451, 0.11270010471343994, 0.10676420480012894, 0.08403033763170242]}\n",
178
  "minimalist\n",
179
  "3\n",
180
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['casual', 'formal (business trip)', 'conservative'], 'scores': [0.6362482309341431, 0.22082458436489105, 0.14292724430561066]}\n",
181
  "casual\n",
182
  "4\n",
183
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['indoor', 'sleeping in a tent', 'huts with half board', 'sleeping in a car'], 'scores': [0.435793399810791, 0.20242486894130707, 0.19281964004039764, 0.16896207630634308]}\n",
184
  "indoor\n",
185
  "5\n",
186
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['no own vehicle', 'own vehicle'], 'scores': [0.9987181425094604, 0.0012818538816645741]}\n",
187
  "no own vehicle\n",
188
+ "6\n",
189
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['self-supported (bring your own food/cooking)', 'no special conditions', 'off-grid / no electricity', 'rainy climate', 'child-friendly', 'snow and ice', 'pet-friendly', 'high alpine terrain', 'avalanche-prone terrain'], 'scores': [0.1984991431236267, 0.1695038080215454, 0.16221018135547638, 0.13200421631336212, 0.12101645022630692, 0.10550825297832489, 0.042406272143125534, 0.03797775134444237, 0.030873913317918777]}\n",
190
+ "self-supported (bring your own food/cooking)\n",
191
+ "7\n",
192
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['7+ days', '2 days', '1 day', '7 days', '5 days', '3 days', '6 days', '4 days'], 'scores': [0.4730822443962097, 0.1168912723660469, 0.10058756172657013, 0.0991850346326828, 0.05424537882208824, 0.053677864372730255, 0.051554784178733826, 0.050775907933712006]}\n",
193
+ "7+ days\n",
194
+ "8\n",
195
+ " superclass pred_class \\\n",
196
+ "0 activity_type micro-adventure / weekend trip \n",
197
+ "1 activities [] \n",
198
+ "2 climate_or_season variable weather / spring / autumn \n",
199
+ "3 style_or_comfort minimalist \n",
200
+ "4 dress_code casual \n",
201
+ "5 accommodation indoor \n",
202
+ "6 transportation no own vehicle \n",
203
+ "7 special_conditions self-supported (bring your own food/cooking) \n",
204
+ "8 trip_length_days 7+ days \n",
205
+ "\n",
206
+ " true_class \n",
207
+ "0 beach vacation \n",
208
+ "1 [swimming, going to the beach, relaxing, hiking] \n",
209
+ "2 warm destination / summer \n",
210
+ "3 lightweight (but comfortable) \n",
211
+ "4 casual \n",
212
+ "5 indoor \n",
213
+ "6 no own vehicle \n",
214
+ "7 no special conditions \n",
215
+ "8 7+ days \n"
216
  ]
217
  },
218
  {
219
+ "ename": "ZeroDivisionError",
220
+ "evalue": "division by zero",
221
  "output_type": "error",
222
  "traceback": [
223
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
224
+ "\u001b[0;31mZeroDivisionError\u001b[0m Traceback (most recent call last)",
225
+ "Cell \u001b[0;32mIn[60], line 13\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(df)\n\u001b[1;32m 12\u001b[0m \u001b[38;5;66;03m# accuracy, perc true classes identified and perc wrong pred classes\u001b[39;00m\n\u001b[0;32m---> 13\u001b[0m performance \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mconcat([performance, \u001b[43mperf_measure\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdf\u001b[49m\u001b[43m)\u001b[49m])\n\u001b[1;32m 14\u001b[0m \u001b[38;5;28mprint\u001b[39m(performance)\n\u001b[1;32m 16\u001b[0m result_list\u001b[38;5;241m.\u001b[39mappend(df)\n",
226
+ "Cell \u001b[0;32mIn[59], line 14\u001b[0m, in \u001b[0;36mperf_measure\u001b[0;34m(df)\u001b[0m\n\u001b[1;32m 12\u001b[0m correct_perc \u001b[38;5;241m=\u001b[39m num_correct\u001b[38;5;241m/\u001b[39m\u001b[38;5;28mlen\u001b[39m(true_class)\n\u001b[1;32m 13\u001b[0m num_pred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(pred_class)\n\u001b[0;32m---> 14\u001b[0m wrong_perc \u001b[38;5;241m=\u001b[39m \u001b[43m(\u001b[49m\u001b[43mnum_pred\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mnum_correct\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43mnum_pred\u001b[49m\n\u001b[1;32m 15\u001b[0m df_perf \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mDataFrame({\n\u001b[1;32m 16\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124maccuracy\u001b[39m\u001b[38;5;124m'\u001b[39m: [accuracy],\n\u001b[1;32m 17\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrue_ident\u001b[39m\u001b[38;5;124m'\u001b[39m: [correct_perc],\n\u001b[1;32m 18\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfalse_pred\u001b[39m\u001b[38;5;124m'\u001b[39m: [wrong_perc]\n\u001b[1;32m 19\u001b[0m })\n\u001b[1;32m 20\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m(df_perf)\n",
227
+ "\u001b[0;31mZeroDivisionError\u001b[0m: division by zero"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
  ]
229
  }
230
  ],
231
  "source": [
232
  "result_list = []\n",
233
  "performance = pd.DataFrame(columns=['accuracy', 'true_ident', 'false_pred'])\n",
234
+ "\n",
235
+ "start_time = time.time()\n",
236
+ "\n",
237
  "for i in range(len(trip_descriptions)):\n",
238
  " current_trip = trip_descriptions[i]\n",
239
  " current_type = trip_types[i]\n",
 
244
  " performance = pd.concat([performance, perf_measure(df)])\n",
245
  " print(performance)\n",
246
  " \n",
247
+ " result_list.append(df)\n",
248
+ "\n",
249
+ "end_time = time.time()\n",
250
+ "\n",
251
+ "elapsed_time = end_time - start_time"
252
  ]
253
  },
254
  {
 
261
  },
262
  {
263
  "cell_type": "code",
264
+ "execution_count": 61,
265
  "id": "eb33fd31-94e6-40b5-9c36-a32effe77c01",
266
  "metadata": {},
267
+ "outputs": [
268
+ {
269
+ "ename": "IndexError",
270
+ "evalue": "list index out of range",
271
+ "output_type": "error",
272
+ "traceback": [
273
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
274
+ "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
275
+ "Cell \u001b[0;32mIn[61], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Extract \"same_value\" column from each DataFrame\u001b[39;00m\n\u001b[1;32m 2\u001b[0m sv_columns \u001b[38;5;241m=\u001b[39m [df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msame_value\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;28;01mfor\u001b[39;00m df \u001b[38;5;129;01min\u001b[39;00m result_list] \u001b[38;5;66;03m# 'same' needs to be changed\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m sv_columns\u001b[38;5;241m.\u001b[39minsert(\u001b[38;5;241m0\u001b[39m, \u001b[43mresult_list\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msuperclass\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# Combine into a new DataFrame (columns side-by-side)\u001b[39;00m\n\u001b[1;32m 6\u001b[0m sv_df \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mconcat(sv_columns, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n",
276
+ "\u001b[0;31mIndexError\u001b[0m: list index out of range"
277
+ ]
278
+ }
279
+ ],
280
  "source": [
281
  "# Extract \"same_value\" column from each DataFrame\n",
282
  "sv_columns = [df['same_value'] for df in result_list] # 'same' needs to be changed\n",
 
290
  },
291
  {
292
  "cell_type": "code",
293
+ "execution_count": 62,
294
  "id": "bf7546cb-79ce-49ad-8cee-54d02239220c",
295
  "metadata": {},
296
+ "outputs": [
297
+ {
298
+ "name": "stdout",
299
+ "output_type": "stream",
300
+ "text": [
301
+ " superclass accuracy\n",
302
+ "0 activity_type 0.8\n",
303
+ "1 activities 0.0\n",
304
+ "2 climate_or_season 0.5\n",
305
+ "3 style_or_comfort 0.3\n",
306
+ "4 dress_code 0.8\n",
307
+ "5 accommodation 0.8\n",
308
+ "6 transportation 0.7\n",
309
+ "7 special_conditions 0.2\n",
310
+ "8 trip_length_days 0.6\n"
311
+ ]
312
+ }
313
+ ],
314
  "source": [
315
  "# Compute accuracy per superclass (row means of same_value matrix excluding the first column)\n",
316
  "row_means = sv_df.iloc[:, 1:].mean(axis=1)\n",
 
350
  "outputs": [],
351
  "source": [
352
  "# save results\n",
 
 
353
  "# Structure to save\n",
354
  "model_result = {\n",
355
  " 'model': model_name,\n",
356
  " 'predictions': result_list,\n",
357
  " 'performance': performance,\n",
358
  " 'perf_summary': column_means,\n",
359
+ " 'perf_superclass': df_row_means,\n",
360
+ " 'elapsed_time': elapsed_time\n",
361
  "}\n",
362
  "\n",
363
  "# File path with folder\n",
 
368
  " pickle.dump(model_result, f)"
369
  ]
370
  },
371
+ {
372
+ "cell_type": "code",
373
+ "execution_count": null,
374
+ "id": "f38d0924-30b6-43cd-9bfc-fe5b0dc80411",
375
+ "metadata": {},
376
+ "outputs": [],
377
+ "source": [
378
+ "print(elapsed_time/60)"
379
+ ]
380
+ },
381
  {
382
  "cell_type": "markdown",
383
  "id": "e1cbb54e-abe6-49b6-957e-0683196f3199",
 
388
  },
389
  {
390
  "cell_type": "code",
391
+ "execution_count": 54,
392
  "id": "62ca82b0-6909-4e6c-9d2c-fed87971e5b6",
393
+ "metadata": {
394
+ "scrolled": true
395
+ },
396
  "outputs": [
397
  {
398
  "name": "stdout",
399
  "output_type": "stream",
400
  "text": [
401
+ "Model: model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli\n",
402
+ "Performance Summary:\n",
403
+ "accuracy 0.522222\n",
404
+ "true_ident 0.841667\n",
405
+ "false_pred 0.572381\n",
406
+ "dtype: float64\n",
407
+ "----------------------------------------\n",
408
  "Model: model_a_facebook-bart-large-mnli\n",
409
  "Performance Summary:\n",
410
  "accuracy 0.454545\n",
 
419
  "false_pred 0.551667\n",
420
  "dtype: float64\n",
421
  "----------------------------------------\n",
422
+ "Model: model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli\n",
423
+ "Performance Summary:\n",
424
+ " superclass accuracy\n",
425
+ "0 activity_type 0.8\n",
426
+ "1 activities 0.0\n",
427
+ "2 climate_or_season 0.5\n",
428
+ "3 style_or_comfort 0.3\n",
429
+ "4 dress_code 0.8\n",
430
+ "5 accommodation 0.8\n",
431
+ "6 transportation 0.7\n",
432
+ "7 special_conditions 0.2\n",
433
+ "8 trip_length_days 0.6\n",
434
+ "----------------------------------------\n",
435
  "Model: model_a_facebook-bart-large-mnli\n",
436
  "Performance Summary:\n",
437
  " superclass accuracy\n",
 
479
  " result = pickle.load(f)\n",
480
  " all_results[model_name] = result\n",
481
  "\n",
482
+ "# Compare performance across models\n",
483
  "for model, data in all_results.items():\n",
484
  " print(f\"Model: {model}\")\n",
485
  " print(f\"Performance Summary:\\n{data['perf_summary']}\")\n",
486
  " print(\"-\" * 40)\n",
487
  "\n",
488
  "\n",
489
+ "# Compare performance across models\n",
490
  "for model, data in all_results.items():\n",
491
  " print(f\"Model: {model}\")\n",
492
  " print(f\"Performance Summary:\\n{data['perf_superclass']}\")\n",
493
  " print(\"-\" * 40)"
494
  ]
495
  },
496
+ {
497
+ "cell_type": "code",
498
+ "execution_count": 69,
499
+ "id": "57fd150d-1cda-4be5-806b-ef380469243a",
500
+ "metadata": {},
501
+ "outputs": [
502
+ {
503
+ "name": "stdout",
504
+ "output_type": "stream",
505
+ "text": [
506
+ "Model: model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli\n",
507
+ "Time in minutes for 10 trips:\n",
508
+ "83.45150986512502\n",
509
+ "----------------------------------------\n",
510
+ "Model: model_a_facebook-bart-large-mnli\n"
511
+ ]
512
+ },
513
+ {
514
+ "ename": "KeyError",
515
+ "evalue": "'elapsed_time'",
516
+ "output_type": "error",
517
+ "traceback": [
518
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
519
+ "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
520
+ "Cell \u001b[0;32mIn[69], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m model, data \u001b[38;5;129;01min\u001b[39;00m all_results\u001b[38;5;241m.\u001b[39mitems():\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mModel: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTime in minutes for 10 trips:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mdata[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124melapsed_time\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m/\u001b[39m\u001b[38;5;241m60\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m-\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m40\u001b[39m)\n",
521
+ "\u001b[0;31mKeyError\u001b[0m: 'elapsed_time'"
522
+ ]
523
+ }
524
+ ],
525
+ "source": [
526
+ "# Compare across models\n",
527
+ "for model, data in all_results.items():\n",
528
+ " print(f\"Model: {model}\")\n",
529
+ " print(f\"Time in minutes for 10 trips:\\n{data['elapsed_time']/60}\")\n",
530
+ " print(\"-\" * 40)"
531
+ ]
532
+ },
533
  {
534
  "cell_type": "markdown",
535
  "id": "17483df4-55c4-41cd-b8a9-61f7a5c7e8a3",
space/gradio_tryout.ipynb CHANGED
@@ -19,12 +19,12 @@
19
  "id": "3bbee2e4-55c8-4b06-9929-72026edf7932",
20
  "metadata": {},
21
  "source": [
22
- "**Load and try the model**"
23
  ]
24
  },
25
  {
26
  "cell_type": "code",
27
- "execution_count": 40,
28
  "id": "f8c28d2d-8458-49fd-8ebf-5e729d6e861f",
29
  "metadata": {},
30
  "outputs": [
@@ -57,8 +57,11 @@
57
  "import time\n",
58
  "\n",
59
  "# Load the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
60
- "classifier = pipeline(\"zero-shot-classification\", model=\"MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli\")\n",
 
61
  "# tried:\n",
 
 
62
  "# facebook/bart-large-mnli\n",
63
  "# sileod/deberta-v3-base-tasksource-nli\n",
64
  "\n",
@@ -85,7 +88,7 @@
85
  },
86
  {
87
  "cell_type": "code",
88
- "execution_count": 41,
89
  "id": "3a762755-872d-43a6-b666-874d6133488c",
90
  "metadata": {},
91
  "outputs": [],
@@ -115,7 +118,7 @@
115
  },
116
  {
117
  "cell_type": "code",
118
- "execution_count": 42,
119
  "id": "3b4f3193-3bdd-453c-8664-df84f955600c",
120
  "metadata": {},
121
  "outputs": [],
@@ -152,60 +155,53 @@
152
  },
153
  {
154
  "cell_type": "code",
155
- "execution_count": 43,
156
  "id": "4dd01755-be8d-4904-8494-ac28aba2fee7",
157
  "metadata": {
158
  "scrolled": true
159
  },
160
  "outputs": [
161
- {
162
- "name": "stderr",
163
- "output_type": "stream",
164
- "text": [
165
- "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
166
- ]
167
- },
168
  {
169
  "name": "stdout",
170
  "output_type": "stream",
171
  "text": [
172
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['beach vacation', 'micro-adventure / weekend trip', 'cultural exploration', 'nature escape', 'digital nomad trip', 'camping trip (campground)', 'camping trip (wild camping)', 'long-distance hike / thru-hike', 'ski tour / skitour', 'hut trek (summer)', 'city trip', 'hut trek (winter)', 'road trip (car/camper)', 'festival trip', 'yoga / wellness retreat', 'snowboard / splitboard trip'], 'scores': [0.37198853492736816, 0.31496119499206543, 0.10890532284975052, 0.09102731198072433, 0.0735681876540184, 0.012933704070746899, 0.009422042407095432, 0.0051276967860758305, 0.004056071396917105, 0.0017408831045031548, 0.001503779087215662, 0.0014244643971323967, 0.0013752576196566224, 0.0009292717440985143, 0.0006881792796775699, 0.0003480584127828479]}\n",
173
- "beach vacation\n",
174
  "0\n",
175
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['relaxing', 'hiking', 'going to the beach', 'photography', 'sightseeing', 'hut-to-hut hiking', 'snorkeling', 'snowshoe hiking', 'yoga', 'stand-up paddleboarding (SUP)', 'kayaking / canoeing', 'horseback riding', 'swimming', 'paragliding', 'rafting', 'biking', 'rock climbing', 'surfing', 'running', 'ice climbing', 'cross-country skiing', 'fishing', 'ski touring', 'skiing', 'scuba diving'], 'scores': [0.9943736791610718, 0.9631249308586121, 0.9454535841941833, 0.7538902759552002, 0.4525446593761444, 0.1696157604455948, 0.05957728251814842, 0.04234873503446579, 0.01991761103272438, 0.016971556469798088, 0.006959819234907627, 0.00411367928609252, 0.0030609173700213432, 0.00186573073733598, 0.0017515394138172269, 0.00142807571683079, 0.0005748369731009007, 0.00037779140984639525, 0.0003097739245276898, 0.00030914091621525586, 0.0002725012309383601, 0.00027050732751376927, 0.00024376016517635435, 0.00017392759036738425, 0.00014787293912377208]}\n",
176
- "['relaxing', 'hiking', 'going to the beach', 'photography']\n",
177
  "1\n",
178
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['tropical / humid', 'warm destination / summer', 'variable weather / spring / autumn', 'cold destination / winter', 'dry / desert-like'], 'scores': [0.4895477890968323, 0.25917261838912964, 0.24829530715942383, 0.0017174285603687167, 0.0012668712297454476]}\n",
179
- "tropical / humid\n",
180
  "2\n",
181
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['minimalist', 'luxury (including evening wear)', 'lightweight (but comfortable)', 'ultralight'], 'scores': [0.7574900984764099, 0.09964746236801147, 0.07804173231124878, 0.06482075154781342]}\n",
182
  "minimalist\n",
183
  "3\n",
184
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['casual', 'conservative', 'formal (business trip)'], 'scores': [0.8163393139839172, 0.11898067593574524, 0.06467998772859573]}\n",
185
  "casual\n",
186
  "4\n",
187
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['indoor', 'huts with half board', 'sleeping in a car', 'sleeping in a tent'], 'scores': [0.6389047503471375, 0.18624886870384216, 0.13902997970581055, 0.03581654652953148]}\n",
188
  "indoor\n",
189
  "5\n",
190
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['no own vehicle', 'own vehicle'], 'scores': [0.9990958571434021, 0.0009041387238539755]}\n",
191
  "no own vehicle\n",
192
  "6\n",
193
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['off-grid / no electricity', 'self-supported (bring your own food/cooking)', 'child-friendly', 'no special conditions', 'pet-friendly', 'rainy climate', 'avalanche-prone terrain', 'high alpine terrain', 'snow and ice'], 'scores': [0.7414510250091553, 0.07683143764734268, 0.055722303688526154, 0.054133761674165726, 0.04852374270558357, 0.006977608893066645, 0.005693929269909859, 0.005599685944616795, 0.005066512618213892]}\n",
194
- "off-grid / no electricity\n",
195
  "7\n",
196
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['7+ days', '2 days', '7 days', '3 days', '4 days', '6 days', '5 days', '1 day'], 'scores': [0.29225289821624756, 0.20232954621315002, 0.1837582290172577, 0.13940994441509247, 0.06562349200248718, 0.04916509613394737, 0.040249694138765335, 0.0272111464291811]}\n",
197
  "7+ days\n",
198
  "8\n",
199
- " superclass pred_class \\\n",
200
- "0 activity_type beach vacation \n",
201
- "1 activities [relaxing, hiking, going to the beach, photogr... \n",
202
- "2 climate_or_season tropical / humid \n",
203
- "3 style_or_comfort minimalist \n",
204
- "4 dress_code casual \n",
205
- "5 accommodation indoor \n",
206
- "6 transportation no own vehicle \n",
207
- "7 special_conditions off-grid / no electricity \n",
208
- "8 trip_length_days 7+ days \n",
209
  "\n",
210
  " true_class \n",
211
  "0 beach vacation \n",
@@ -216,51 +212,19 @@
216
  "5 indoor \n",
217
  "6 no own vehicle \n",
218
  "7 no special conditions \n",
219
- "8 7+ days \n",
220
- " accuracy true_ident false_pred\n",
221
- "0 0.555556 0.75 0.25\n",
222
- "{'sequence': 'We are a couple in our thirties traveling to Vienna for a three-day city trip. We’ll be staying at a friend’s house and plan to explore the city by sightseeing, strolling through the streets, visiting markets, and trying out great restaurants and cafés. We also hope to attend a classical music concert. Our journey to Vienna will be by train.', 'labels': ['city trip', 'cultural exploration', 'micro-adventure / weekend trip', 'ski tour / skitour', 'festival trip', 'digital nomad trip', 'hut trek (winter)', 'camping trip (campground)', 'long-distance hike / thru-hike', 'hut trek (summer)', 'nature escape', 'camping trip (wild camping)', 'yoga / wellness retreat', 'road trip (car/camper)', 'beach vacation', 'snowboard / splitboard trip'], 'scores': [0.517789363861084, 0.297355592250824, 0.1621870994567871, 0.006185388192534447, 0.005294559057801962, 0.002764208009466529, 0.001503965351730585, 0.0014866390265524387, 0.0012240204960107803, 0.0012071850942447782, 0.000757778063416481, 0.0006650012801401317, 0.0005547589971683919, 0.00043604226084426045, 0.00031738984398543835, 0.0002710542466957122]}\n",
223
- "city trip\n",
224
- "0\n"
225
  ]
226
  },
227
  {
228
- "ename": "KeyboardInterrupt",
229
- "evalue": "",
230
  "output_type": "error",
231
  "traceback": [
232
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
233
- "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
234
- "Cell \u001b[0;32mIn[43], line 7\u001b[0m\n\u001b[1;32m 5\u001b[0m current_trip \u001b[38;5;241m=\u001b[39m trip_descriptions[i]\n\u001b[1;32m 6\u001b[0m current_type \u001b[38;5;241m=\u001b[39m trip_types[i]\n\u001b[0;32m----> 7\u001b[0m df \u001b[38;5;241m=\u001b[39m \u001b[43mpred_trip\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcurrent_trip\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcurrent_type\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcut_off\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0.5\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28mprint\u001b[39m(df)\n\u001b[1;32m 10\u001b[0m \u001b[38;5;66;03m# accuracy, perc true classes identified and perc wrong pred classes\u001b[39;00m\n",
235
- "Cell \u001b[0;32mIn[41], line 10\u001b[0m, in \u001b[0;36mpred_trip\u001b[0;34m(trip_descr, trip_type, cut_off)\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, key \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(keys_list):\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m key \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mactivities\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[0;32m---> 10\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mclassifier\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrip_descr\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcandidate_labels\u001b[49m\u001b[43m[\u001b[49m\u001b[43mkey\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_label\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m indices \u001b[38;5;241m=\u001b[39m [i \u001b[38;5;28;01mfor\u001b[39;00m i, score \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(result[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mscores\u001b[39m\u001b[38;5;124m'\u001b[39m]) \u001b[38;5;28;01mif\u001b[39;00m score \u001b[38;5;241m>\u001b[39m cut_off]\n\u001b[1;32m 12\u001b[0m classes \u001b[38;5;241m=\u001b[39m [result[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlabels\u001b[39m\u001b[38;5;124m'\u001b[39m][i] \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m indices]\n",
236
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/zero_shot_classification.py:206\u001b[0m, in \u001b[0;36mZeroShotClassificationPipeline.__call__\u001b[0;34m(self, sequences, *args, **kwargs)\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 204\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnable to understand extra arguments \u001b[39m\u001b[38;5;132;01m{\u001b[39;00margs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 206\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__call__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43msequences\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
237
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/base.py:1294\u001b[0m, in \u001b[0;36mPipeline.__call__\u001b[0;34m(self, inputs, num_workers, batch_size, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39miterate(inputs, preprocess_params, forward_params, postprocess_params)\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mframework \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m, ChunkPipeline):\n\u001b[0;32m-> 1294\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1295\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43miter\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1296\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_iterator\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1297\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_workers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpreprocess_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mforward_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpostprocess_params\u001b[49m\n\u001b[1;32m 1298\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1299\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1300\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1301\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1302\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrun_single(inputs, preprocess_params, forward_params, postprocess_params)\n",
238
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/pt_utils.py:124\u001b[0m, in \u001b[0;36mPipelineIterator.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloader_batch_item()\n\u001b[1;32m 123\u001b[0m \u001b[38;5;66;03m# We're out of items within a batch\u001b[39;00m\n\u001b[0;32m--> 124\u001b[0m item \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 125\u001b[0m processed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfer(item, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparams)\n\u001b[1;32m 126\u001b[0m \u001b[38;5;66;03m# We now have a batch of \"inferred things\".\u001b[39;00m\n",
239
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/pt_utils.py:269\u001b[0m, in \u001b[0;36mPipelinePackIterator.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m accumulator\n\u001b[1;32m 268\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_last:\n\u001b[0;32m--> 269\u001b[0m processed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minfer\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 270\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloader_batch_size \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 271\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(processed, torch\u001b[38;5;241m.\u001b[39mTensor):\n",
240
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/base.py:1209\u001b[0m, in \u001b[0;36mPipeline.forward\u001b[0;34m(self, model_inputs, **forward_params)\u001b[0m\n\u001b[1;32m 1207\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m inference_context():\n\u001b[1;32m 1208\u001b[0m model_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_inputs, device\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[0;32m-> 1209\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mforward_params\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1210\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_outputs, device\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mdevice(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcpu\u001b[39m\u001b[38;5;124m\"\u001b[39m))\n\u001b[1;32m 1211\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
241
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/zero_shot_classification.py:229\u001b[0m, in \u001b[0;36mZeroShotClassificationPipeline._forward\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 227\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muse_cache\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m inspect\u001b[38;5;241m.\u001b[39msignature(model_forward)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m 228\u001b[0m model_inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muse_cache\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[0;32m--> 229\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 231\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 232\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcandidate_label\u001b[39m\u001b[38;5;124m\"\u001b[39m: candidate_label,\n\u001b[1;32m 233\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msequence\u001b[39m\u001b[38;5;124m\"\u001b[39m: sequence,\n\u001b[1;32m 234\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mis_last\u001b[39m\u001b[38;5;124m\"\u001b[39m: inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mis_last\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[1;32m 235\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39moutputs,\n\u001b[1;32m 236\u001b[0m }\n\u001b[1;32m 237\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model_outputs\n",
242
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
243
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
244
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:1297\u001b[0m, in \u001b[0;36mDebertaV2ForSequenceClassification.forward\u001b[0;34m(self, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1289\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1290\u001b[0m \u001b[38;5;124;03mlabels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\u001b[39;00m\n\u001b[1;32m 1291\u001b[0m \u001b[38;5;124;03m Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\u001b[39;00m\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;124;03m config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\u001b[39;00m\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;124;03m `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\u001b[39;00m\n\u001b[1;32m 1294\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1295\u001b[0m return_dict \u001b[38;5;241m=\u001b[39m return_dict \u001b[38;5;28;01mif\u001b[39;00m return_dict \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_return_dict\n\u001b[0;32m-> 1297\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdeberta\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1298\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1299\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken_type_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken_type_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1300\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1301\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1302\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1303\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1304\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1305\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1306\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1308\u001b[0m encoder_layer \u001b[38;5;241m=\u001b[39m outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 1309\u001b[0m pooled_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpooler(encoder_layer)\n",
245
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
246
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
247
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:1063\u001b[0m, in \u001b[0;36mDebertaV2Model.forward\u001b[0;34m(self, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1053\u001b[0m token_type_ids \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mzeros(input_shape, dtype\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mlong, device\u001b[38;5;241m=\u001b[39mdevice)\n\u001b[1;32m 1055\u001b[0m embedding_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membeddings(\n\u001b[1;32m 1056\u001b[0m input_ids\u001b[38;5;241m=\u001b[39minput_ids,\n\u001b[1;32m 1057\u001b[0m token_type_ids\u001b[38;5;241m=\u001b[39mtoken_type_ids,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1060\u001b[0m inputs_embeds\u001b[38;5;241m=\u001b[39minputs_embeds,\n\u001b[1;32m 1061\u001b[0m )\n\u001b[0;32m-> 1063\u001b[0m encoder_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1064\u001b[0m \u001b[43m \u001b[49m\u001b[43membedding_output\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1065\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1066\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 1067\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1068\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1069\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1070\u001b[0m encoded_layers \u001b[38;5;241m=\u001b[39m encoder_outputs[\u001b[38;5;241m1\u001b[39m]\n\u001b[1;32m 1072\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mz_steps \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m:\n",
248
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
249
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
250
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:507\u001b[0m, in \u001b[0;36mDebertaV2Encoder.forward\u001b[0;34m(self, hidden_states, attention_mask, output_hidden_states, output_attentions, query_states, relative_pos, return_dict)\u001b[0m\n\u001b[1;32m 497\u001b[0m output_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_gradient_checkpointing_func(\n\u001b[1;32m 498\u001b[0m layer_module\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__call__\u001b[39m,\n\u001b[1;32m 499\u001b[0m next_kv,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 504\u001b[0m output_attentions,\n\u001b[1;32m 505\u001b[0m )\n\u001b[1;32m 506\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 507\u001b[0m output_states \u001b[38;5;241m=\u001b[39m \u001b[43mlayer_module\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 508\u001b[0m \u001b[43m \u001b[49m\u001b[43mnext_kv\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 509\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 510\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 511\u001b[0m \u001b[43m \u001b[49m\u001b[43mrelative_pos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_pos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 512\u001b[0m \u001b[43m \u001b[49m\u001b[43mrel_embeddings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrel_embeddings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 513\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 514\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 516\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 517\u001b[0m output_states, att_m \u001b[38;5;241m=\u001b[39m output_states\n",
251
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
252
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
253
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:355\u001b[0m, in \u001b[0;36mDebertaV2Layer.forward\u001b[0;34m(self, hidden_states, attention_mask, query_states, relative_pos, rel_embeddings, output_attentions)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[1;32m 347\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 348\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 353\u001b[0m output_attentions\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 354\u001b[0m ):\n\u001b[0;32m--> 355\u001b[0m attention_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 357\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 358\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 359\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 360\u001b[0m \u001b[43m \u001b[49m\u001b[43mrelative_pos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_pos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 361\u001b[0m \u001b[43m \u001b[49m\u001b[43mrel_embeddings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrel_embeddings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 362\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 363\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 364\u001b[0m attention_output, att_matrix \u001b[38;5;241m=\u001b[39m attention_output\n",
254
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
255
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
256
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:286\u001b[0m, in \u001b[0;36mDebertaV2Attention.forward\u001b[0;34m(self, hidden_states, attention_mask, output_attentions, query_states, relative_pos, rel_embeddings)\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[1;32m 278\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 279\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 284\u001b[0m rel_embeddings\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 285\u001b[0m ):\n\u001b[0;32m--> 286\u001b[0m self_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mself\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 287\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 288\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 289\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 290\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 291\u001b[0m \u001b[43m \u001b[49m\u001b[43mrelative_pos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_pos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 292\u001b[0m \u001b[43m \u001b[49m\u001b[43mrel_embeddings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrel_embeddings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 293\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 294\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 295\u001b[0m self_output, att_matrix \u001b[38;5;241m=\u001b[39m self_output\n",
257
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
258
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
259
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:700\u001b[0m, in \u001b[0;36mDisentangledSelfAttention.forward\u001b[0;34m(self, hidden_states, attention_mask, output_attentions, query_states, relative_pos, rel_embeddings)\u001b[0m\n\u001b[1;32m 698\u001b[0m query_states \u001b[38;5;241m=\u001b[39m hidden_states\n\u001b[1;32m 699\u001b[0m query_layer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtranspose_for_scores(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mquery_proj(query_states), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_attention_heads)\n\u001b[0;32m--> 700\u001b[0m key_layer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtranspose_for_scores(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mkey_proj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m)\u001b[49m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_attention_heads)\n\u001b[1;32m 701\u001b[0m value_layer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtranspose_for_scores(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvalue_proj(hidden_states), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_attention_heads)\n\u001b[1;32m 703\u001b[0m rel_att \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
260
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
261
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
262
- "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/linear.py:116\u001b[0m, in \u001b[0;36mLinear.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlinear\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
263
- "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
264
  ]
265
  }
266
  ],
@@ -269,6 +233,7 @@
269
  "performance = pd.DataFrame(columns=['accuracy', 'true_ident', 'false_pred'])\n",
270
  "\n",
271
  "start_time = time.time()\n",
 
272
  "for i in range(len(trip_descriptions)):\n",
273
  " current_trip = trip_descriptions[i]\n",
274
  " current_type = trip_types[i]\n",
@@ -279,7 +244,11 @@
279
  " performance = pd.concat([performance, perf_measure(df)])\n",
280
  " print(performance)\n",
281
  " \n",
282
- " result_list.append(df)"
 
 
 
 
283
  ]
284
  },
285
  {
@@ -292,10 +261,22 @@
292
  },
293
  {
294
  "cell_type": "code",
295
- "execution_count": null,
296
  "id": "eb33fd31-94e6-40b5-9c36-a32effe77c01",
297
  "metadata": {},
298
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
299
  "source": [
300
  "# Extract \"same_value\" column from each DataFrame\n",
301
  "sv_columns = [df['same_value'] for df in result_list] # 'same' needs to be changed\n",
@@ -309,10 +290,27 @@
309
  },
310
  {
311
  "cell_type": "code",
312
- "execution_count": null,
313
  "id": "bf7546cb-79ce-49ad-8cee-54d02239220c",
314
  "metadata": {},
315
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  "source": [
317
  "# Compute accuracy per superclass (row means of same_value matrix excluding the first column)\n",
318
  "row_means = sv_df.iloc[:, 1:].mean(axis=1)\n",
@@ -352,15 +350,14 @@
352
  "outputs": [],
353
  "source": [
354
  "# save results\n",
355
- "# Example data for one model\n",
356
- "model_name = 'model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli'\n",
357
  "# Structure to save\n",
358
  "model_result = {\n",
359
  " 'model': model_name,\n",
360
  " 'predictions': result_list,\n",
361
  " 'performance': performance,\n",
362
  " 'perf_summary': column_means,\n",
363
- " 'perf_superclass': df_row_means\n",
 
364
  "}\n",
365
  "\n",
366
  "# File path with folder\n",
@@ -371,6 +368,16 @@
371
  " pickle.dump(model_result, f)"
372
  ]
373
  },
 
 
 
 
 
 
 
 
 
 
374
  {
375
  "cell_type": "markdown",
376
  "id": "e1cbb54e-abe6-49b6-957e-0683196f3199",
@@ -381,14 +388,23 @@
381
  },
382
  {
383
  "cell_type": "code",
384
- "execution_count": 35,
385
  "id": "62ca82b0-6909-4e6c-9d2c-fed87971e5b6",
386
- "metadata": {},
 
 
387
  "outputs": [
388
  {
389
  "name": "stdout",
390
  "output_type": "stream",
391
  "text": [
 
 
 
 
 
 
 
392
  "Model: model_a_facebook-bart-large-mnli\n",
393
  "Performance Summary:\n",
394
  "accuracy 0.454545\n",
@@ -403,6 +419,19 @@
403
  "false_pred 0.551667\n",
404
  "dtype: float64\n",
405
  "----------------------------------------\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
406
  "Model: model_a_facebook-bart-large-mnli\n",
407
  "Performance Summary:\n",
408
  " superclass accuracy\n",
@@ -450,20 +479,57 @@
450
  " result = pickle.load(f)\n",
451
  " all_results[model_name] = result\n",
452
  "\n",
453
- "# Now you can compare performance across models\n",
454
  "for model, data in all_results.items():\n",
455
  " print(f\"Model: {model}\")\n",
456
  " print(f\"Performance Summary:\\n{data['perf_summary']}\")\n",
457
  " print(\"-\" * 40)\n",
458
  "\n",
459
  "\n",
460
- "# Now you can compare performance across models\n",
461
  "for model, data in all_results.items():\n",
462
  " print(f\"Model: {model}\")\n",
463
  " print(f\"Performance Summary:\\n{data['perf_superclass']}\")\n",
464
  " print(\"-\" * 40)"
465
  ]
466
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
  {
468
  "cell_type": "markdown",
469
  "id": "17483df4-55c4-41cd-b8a9-61f7a5c7e8a3",
 
19
  "id": "3bbee2e4-55c8-4b06-9929-72026edf7932",
20
  "metadata": {},
21
  "source": [
22
+ "**Load prerequisites**"
23
  ]
24
  },
25
  {
26
  "cell_type": "code",
27
+ "execution_count": 57,
28
  "id": "f8c28d2d-8458-49fd-8ebf-5e729d6e861f",
29
  "metadata": {},
30
  "outputs": [
 
57
  "import time\n",
58
  "\n",
59
  "# Load the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
60
+ "classifier = pipeline(\"zero-shot-classification\", model=\"cross-encoder/nli-deberta-v3-base\")\n",
61
+ "model_name = 'model_cross-encoder-nli-deberta-v3-base'\n",
62
  "# tried:\n",
63
+ "# cross-encoder/nli-deberta-v3-large gave error\n",
64
+ "# MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli\n",
65
  "# facebook/bart-large-mnli\n",
66
  "# sileod/deberta-v3-base-tasksource-nli\n",
67
  "\n",
 
88
  },
89
  {
90
  "cell_type": "code",
91
+ "execution_count": 58,
92
  "id": "3a762755-872d-43a6-b666-874d6133488c",
93
  "metadata": {},
94
  "outputs": [],
 
118
  },
119
  {
120
  "cell_type": "code",
121
+ "execution_count": 59,
122
  "id": "3b4f3193-3bdd-453c-8664-df84f955600c",
123
  "metadata": {},
124
  "outputs": [],
 
155
  },
156
  {
157
  "cell_type": "code",
158
+ "execution_count": 60,
159
  "id": "4dd01755-be8d-4904-8494-ac28aba2fee7",
160
  "metadata": {
161
  "scrolled": true
162
  },
163
  "outputs": [
 
 
 
 
 
 
 
164
  {
165
  "name": "stdout",
166
  "output_type": "stream",
167
  "text": [
168
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['micro-adventure / weekend trip', 'digital nomad trip', 'beach vacation', 'festival trip', 'city trip', 'cultural exploration', 'road trip (car/camper)', 'camping trip (wild camping)', 'long-distance hike / thru-hike', 'hut trek (winter)', 'ski tour / skitour', 'snowboard / splitboard trip', 'nature escape', 'yoga / wellness retreat', 'hut trek (summer)', 'camping trip (campground)'], 'scores': [0.9722680449485779, 0.007802918087691069, 0.0075571718625724316, 0.0022959215566515923, 0.0021305829286575317, 0.001222927705384791, 0.0009879637509584427, 0.000805296644102782, 0.0007946204277686775, 0.0007107199053280056, 0.0007009899127297103, 0.0006353880744427443, 0.0005838185315951705, 0.0005424902774393559, 0.0004807499353773892, 0.0004804217896889895]}\n",
169
+ "micro-adventure / weekend trip\n",
170
  "0\n",
171
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['going to the beach', 'sightseeing', 'relaxing', 'hiking', 'hut-to-hut hiking', 'stand-up paddleboarding (SUP)', 'photography', 'biking', 'running', 'ski touring', 'snowshoe hiking', 'yoga', 'kayaking / canoeing', 'horseback riding', 'rafting', 'paragliding', 'cross-country skiing', 'surfing', 'skiing', 'ice climbing', 'fishing', 'snorkeling', 'swimming', 'rock climbing', 'scuba diving'], 'scores': [0.4660525321960449, 0.007281942293047905, 0.003730606520548463, 0.0001860307966126129, 0.00014064949937164783, 0.00011034693307010457, 5.2949126256862655e-05, 3.828677654382773e-05, 3.396756437723525e-05, 1.5346524378401227e-05, 9.348185812996235e-06, 8.182429155567661e-06, 6.5973340497293975e-06, 6.271920938161202e-06, 5.544673058466287e-06, 5.299102667777333e-06, 4.855380211665761e-06, 4.506250661506783e-06, 3.949530764657538e-06, 3.730233856913401e-06, 3.297281637060223e-06, 3.0508665531669976e-06, 2.933618134193239e-06, 2.6379277642263332e-06, 2.2992651338427095e-06]}\n",
172
+ "[]\n",
173
  "1\n",
174
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['variable weather / spring / autumn', 'warm destination / summer', 'cold destination / winter', 'dry / desert-like', 'tropical / humid'], 'scores': [0.5934922695159912, 0.17430798709392548, 0.10943299531936646, 0.07068652659654617, 0.05208020657300949]}\n",
175
+ "variable weather / spring / autumn\n",
176
  "2\n",
177
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['minimalist', 'ultralight', 'luxury (including evening wear)', 'lightweight (but comfortable)'], 'scores': [0.6965053081512451, 0.11270010471343994, 0.10676420480012894, 0.08403033763170242]}\n",
178
  "minimalist\n",
179
  "3\n",
180
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['casual', 'formal (business trip)', 'conservative'], 'scores': [0.6362482309341431, 0.22082458436489105, 0.14292724430561066]}\n",
181
  "casual\n",
182
  "4\n",
183
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['indoor', 'sleeping in a tent', 'huts with half board', 'sleeping in a car'], 'scores': [0.435793399810791, 0.20242486894130707, 0.19281964004039764, 0.16896207630634308]}\n",
184
  "indoor\n",
185
  "5\n",
186
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['no own vehicle', 'own vehicle'], 'scores': [0.9987181425094604, 0.0012818538816645741]}\n",
187
  "no own vehicle\n",
188
  "6\n",
189
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['self-supported (bring your own food/cooking)', 'no special conditions', 'off-grid / no electricity', 'rainy climate', 'child-friendly', 'snow and ice', 'pet-friendly', 'high alpine terrain', 'avalanche-prone terrain'], 'scores': [0.1984991431236267, 0.1695038080215454, 0.16221018135547638, 0.13200421631336212, 0.12101645022630692, 0.10550825297832489, 0.042406272143125534, 0.03797775134444237, 0.030873913317918777]}\n",
190
+ "self-supported (bring your own food/cooking)\n",
191
  "7\n",
192
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['7+ days', '2 days', '1 day', '7 days', '5 days', '3 days', '6 days', '4 days'], 'scores': [0.4730822443962097, 0.1168912723660469, 0.10058756172657013, 0.0991850346326828, 0.05424537882208824, 0.053677864372730255, 0.051554784178733826, 0.050775907933712006]}\n",
193
  "7+ days\n",
194
  "8\n",
195
+ " superclass pred_class \\\n",
196
+ "0 activity_type micro-adventure / weekend trip \n",
197
+ "1 activities [] \n",
198
+ "2 climate_or_season variable weather / spring / autumn \n",
199
+ "3 style_or_comfort minimalist \n",
200
+ "4 dress_code casual \n",
201
+ "5 accommodation indoor \n",
202
+ "6 transportation no own vehicle \n",
203
+ "7 special_conditions self-supported (bring your own food/cooking) \n",
204
+ "8 trip_length_days 7+ days \n",
205
  "\n",
206
  " true_class \n",
207
  "0 beach vacation \n",
 
212
  "5 indoor \n",
213
  "6 no own vehicle \n",
214
  "7 no special conditions \n",
215
+ "8 7+ days \n"
 
 
 
 
 
216
  ]
217
  },
218
  {
219
+ "ename": "ZeroDivisionError",
220
+ "evalue": "division by zero",
221
  "output_type": "error",
222
  "traceback": [
223
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
224
+ "\u001b[0;31mZeroDivisionError\u001b[0m Traceback (most recent call last)",
225
+ "Cell \u001b[0;32mIn[60], line 13\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28mprint\u001b[39m(df)\n\u001b[1;32m 12\u001b[0m \u001b[38;5;66;03m# accuracy, perc true classes identified and perc wrong pred classes\u001b[39;00m\n\u001b[0;32m---> 13\u001b[0m performance \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mconcat([performance, \u001b[43mperf_measure\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdf\u001b[49m\u001b[43m)\u001b[49m])\n\u001b[1;32m 14\u001b[0m \u001b[38;5;28mprint\u001b[39m(performance)\n\u001b[1;32m 16\u001b[0m result_list\u001b[38;5;241m.\u001b[39mappend(df)\n",
226
+ "Cell \u001b[0;32mIn[59], line 14\u001b[0m, in \u001b[0;36mperf_measure\u001b[0;34m(df)\u001b[0m\n\u001b[1;32m 12\u001b[0m correct_perc \u001b[38;5;241m=\u001b[39m num_correct\u001b[38;5;241m/\u001b[39m\u001b[38;5;28mlen\u001b[39m(true_class)\n\u001b[1;32m 13\u001b[0m num_pred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(pred_class)\n\u001b[0;32m---> 14\u001b[0m wrong_perc \u001b[38;5;241m=\u001b[39m \u001b[43m(\u001b[49m\u001b[43mnum_pred\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mnum_correct\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43mnum_pred\u001b[49m\n\u001b[1;32m 15\u001b[0m df_perf \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mDataFrame({\n\u001b[1;32m 16\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124maccuracy\u001b[39m\u001b[38;5;124m'\u001b[39m: [accuracy],\n\u001b[1;32m 17\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrue_ident\u001b[39m\u001b[38;5;124m'\u001b[39m: [correct_perc],\n\u001b[1;32m 18\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfalse_pred\u001b[39m\u001b[38;5;124m'\u001b[39m: [wrong_perc]\n\u001b[1;32m 19\u001b[0m })\n\u001b[1;32m 20\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m(df_perf)\n",
227
+ "\u001b[0;31mZeroDivisionError\u001b[0m: division by zero"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
  ]
229
  }
230
  ],
 
233
  "performance = pd.DataFrame(columns=['accuracy', 'true_ident', 'false_pred'])\n",
234
  "\n",
235
  "start_time = time.time()\n",
236
+ "\n",
237
  "for i in range(len(trip_descriptions)):\n",
238
  " current_trip = trip_descriptions[i]\n",
239
  " current_type = trip_types[i]\n",
 
244
  " performance = pd.concat([performance, perf_measure(df)])\n",
245
  " print(performance)\n",
246
  " \n",
247
+ " result_list.append(df)\n",
248
+ "\n",
249
+ "end_time = time.time()\n",
250
+ "\n",
251
+ "elapsed_time = end_time - start_time"
252
  ]
253
  },
254
  {
 
261
  },
262
  {
263
  "cell_type": "code",
264
+ "execution_count": 61,
265
  "id": "eb33fd31-94e6-40b5-9c36-a32effe77c01",
266
  "metadata": {},
267
+ "outputs": [
268
+ {
269
+ "ename": "IndexError",
270
+ "evalue": "list index out of range",
271
+ "output_type": "error",
272
+ "traceback": [
273
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
274
+ "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
275
+ "Cell \u001b[0;32mIn[61], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Extract \"same_value\" column from each DataFrame\u001b[39;00m\n\u001b[1;32m 2\u001b[0m sv_columns \u001b[38;5;241m=\u001b[39m [df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msame_value\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;28;01mfor\u001b[39;00m df \u001b[38;5;129;01min\u001b[39;00m result_list] \u001b[38;5;66;03m# 'same' needs to be changed\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m sv_columns\u001b[38;5;241m.\u001b[39minsert(\u001b[38;5;241m0\u001b[39m, \u001b[43mresult_list\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msuperclass\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# Combine into a new DataFrame (columns side-by-side)\u001b[39;00m\n\u001b[1;32m 6\u001b[0m sv_df \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mconcat(sv_columns, axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n",
276
+ "\u001b[0;31mIndexError\u001b[0m: list index out of range"
277
+ ]
278
+ }
279
+ ],
280
  "source": [
281
  "# Extract \"same_value\" column from each DataFrame\n",
282
  "sv_columns = [df['same_value'] for df in result_list] # 'same' needs to be changed\n",
 
290
  },
291
  {
292
  "cell_type": "code",
293
+ "execution_count": 62,
294
  "id": "bf7546cb-79ce-49ad-8cee-54d02239220c",
295
  "metadata": {},
296
+ "outputs": [
297
+ {
298
+ "name": "stdout",
299
+ "output_type": "stream",
300
+ "text": [
301
+ " superclass accuracy\n",
302
+ "0 activity_type 0.8\n",
303
+ "1 activities 0.0\n",
304
+ "2 climate_or_season 0.5\n",
305
+ "3 style_or_comfort 0.3\n",
306
+ "4 dress_code 0.8\n",
307
+ "5 accommodation 0.8\n",
308
+ "6 transportation 0.7\n",
309
+ "7 special_conditions 0.2\n",
310
+ "8 trip_length_days 0.6\n"
311
+ ]
312
+ }
313
+ ],
314
  "source": [
315
  "# Compute accuracy per superclass (row means of same_value matrix excluding the first column)\n",
316
  "row_means = sv_df.iloc[:, 1:].mean(axis=1)\n",
 
350
  "outputs": [],
351
  "source": [
352
  "# save results\n",
 
 
353
  "# Structure to save\n",
354
  "model_result = {\n",
355
  " 'model': model_name,\n",
356
  " 'predictions': result_list,\n",
357
  " 'performance': performance,\n",
358
  " 'perf_summary': column_means,\n",
359
+ " 'perf_superclass': df_row_means,\n",
360
+ " 'elapsed_time': elapsed_time\n",
361
  "}\n",
362
  "\n",
363
  "# File path with folder\n",
 
368
  " pickle.dump(model_result, f)"
369
  ]
370
  },
371
+ {
372
+ "cell_type": "code",
373
+ "execution_count": null,
374
+ "id": "f38d0924-30b6-43cd-9bfc-fe5b0dc80411",
375
+ "metadata": {},
376
+ "outputs": [],
377
+ "source": [
378
+ "print(elapsed_time/60)"
379
+ ]
380
+ },
381
  {
382
  "cell_type": "markdown",
383
  "id": "e1cbb54e-abe6-49b6-957e-0683196f3199",
 
388
  },
389
  {
390
  "cell_type": "code",
391
+ "execution_count": 54,
392
  "id": "62ca82b0-6909-4e6c-9d2c-fed87971e5b6",
393
+ "metadata": {
394
+ "scrolled": true
395
+ },
396
  "outputs": [
397
  {
398
  "name": "stdout",
399
  "output_type": "stream",
400
  "text": [
401
+ "Model: model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli\n",
402
+ "Performance Summary:\n",
403
+ "accuracy 0.522222\n",
404
+ "true_ident 0.841667\n",
405
+ "false_pred 0.572381\n",
406
+ "dtype: float64\n",
407
+ "----------------------------------------\n",
408
  "Model: model_a_facebook-bart-large-mnli\n",
409
  "Performance Summary:\n",
410
  "accuracy 0.454545\n",
 
419
  "false_pred 0.551667\n",
420
  "dtype: float64\n",
421
  "----------------------------------------\n",
422
+ "Model: model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli\n",
423
+ "Performance Summary:\n",
424
+ " superclass accuracy\n",
425
+ "0 activity_type 0.8\n",
426
+ "1 activities 0.0\n",
427
+ "2 climate_or_season 0.5\n",
428
+ "3 style_or_comfort 0.3\n",
429
+ "4 dress_code 0.8\n",
430
+ "5 accommodation 0.8\n",
431
+ "6 transportation 0.7\n",
432
+ "7 special_conditions 0.2\n",
433
+ "8 trip_length_days 0.6\n",
434
+ "----------------------------------------\n",
435
  "Model: model_a_facebook-bart-large-mnli\n",
436
  "Performance Summary:\n",
437
  " superclass accuracy\n",
 
479
  " result = pickle.load(f)\n",
480
  " all_results[model_name] = result\n",
481
  "\n",
482
+ "# Compare performance across models\n",
483
  "for model, data in all_results.items():\n",
484
  " print(f\"Model: {model}\")\n",
485
  " print(f\"Performance Summary:\\n{data['perf_summary']}\")\n",
486
  " print(\"-\" * 40)\n",
487
  "\n",
488
  "\n",
489
+ "# Compare performance across models\n",
490
  "for model, data in all_results.items():\n",
491
  " print(f\"Model: {model}\")\n",
492
  " print(f\"Performance Summary:\\n{data['perf_superclass']}\")\n",
493
  " print(\"-\" * 40)"
494
  ]
495
  },
496
+ {
497
+ "cell_type": "code",
498
+ "execution_count": 69,
499
+ "id": "57fd150d-1cda-4be5-806b-ef380469243a",
500
+ "metadata": {},
501
+ "outputs": [
502
+ {
503
+ "name": "stdout",
504
+ "output_type": "stream",
505
+ "text": [
506
+ "Model: model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli\n",
507
+ "Time in minutes for 10 trips:\n",
508
+ "83.45150986512502\n",
509
+ "----------------------------------------\n",
510
+ "Model: model_a_facebook-bart-large-mnli\n"
511
+ ]
512
+ },
513
+ {
514
+ "ename": "KeyError",
515
+ "evalue": "'elapsed_time'",
516
+ "output_type": "error",
517
+ "traceback": [
518
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
519
+ "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
520
+ "Cell \u001b[0;32mIn[69], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m model, data \u001b[38;5;129;01min\u001b[39;00m all_results\u001b[38;5;241m.\u001b[39mitems():\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mModel: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m----> 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTime in minutes for 10 trips:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mdata[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124melapsed_time\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m/\u001b[39m\u001b[38;5;241m60\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m-\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m40\u001b[39m)\n",
521
+ "\u001b[0;31mKeyError\u001b[0m: 'elapsed_time'"
522
+ ]
523
+ }
524
+ ],
525
+ "source": [
526
+ "# Compare across models\n",
527
+ "for model, data in all_results.items():\n",
528
+ " print(f\"Model: {model}\")\n",
529
+ " print(f\"Time in minutes for 10 trips:\\n{data['elapsed_time']/60}\")\n",
530
+ " print(\"-\" * 40)"
531
+ ]
532
+ },
533
  {
534
  "cell_type": "markdown",
535
  "id": "17483df4-55c4-41cd-b8a9-61f7a5c7e8a3",
space/packing_label_structure.json CHANGED
@@ -49,7 +49,8 @@
49
  "warm destination / summer",
50
  "variable weather / spring / autumn",
51
  "tropical / humid",
52
- "dry / desert-like"
 
53
  ],
54
  "style_or_comfort": [
55
  "ultralight",
@@ -74,14 +75,13 @@
74
  ],
75
  "special_conditions": [
76
  "off-grid / no electricity",
77
- "self-supported (bring your own food/cooking)",
78
- "child-friendly",
79
  "pet-friendly",
80
- "rainy climate",
81
  "snow and ice",
82
  "high alpine terrain",
83
- "avalanche-prone terrain",
84
- "no special conditions"
85
  ],
86
  "trip_length_days": [
87
  "1 day",
 
49
  "warm destination / summer",
50
  "variable weather / spring / autumn",
51
  "tropical / humid",
52
+ "dry / desert-like",
53
+ "rainy climate"
54
  ],
55
  "style_or_comfort": [
56
  "ultralight",
 
75
  ],
76
  "special_conditions": [
77
  "off-grid / no electricity",
78
+ "self-supported (bring your own cooking gear)",
79
+ "travel with children",
80
  "pet-friendly",
 
81
  "snow and ice",
82
  "high alpine terrain",
83
+ "snow, ice and avalanche-prone terrain",
84
+ "no special conditions to consider"
85
  ],
86
  "trip_length_days": [
87
  "1 day",
space/packing_templates_self_supported_offgrid_expanded.json CHANGED
@@ -535,7 +535,7 @@
535
  "USB-hub (voor meerdere devices)",
536
  "verpakking om elektronica droog te houden"
537
  ],
538
- "self-supported (bring your own food/cooking)": [
539
  "lichtgewicht kooktoestel (gas, benzine of alcohol)",
540
  "brandstof (voldoende voor aantal dagen)",
541
  "pan of keteltje",
@@ -552,7 +552,7 @@
552
  "minstens 2 liter wateropslag per persoon",
553
  "food bag of hangzak voor voedsel (wild-safe)"
554
  ],
555
- "child-friendly": [
556
  "snacks en speelgoed",
557
  "EHBO-set met pleisters",
558
  "extra kleding",
@@ -606,7 +606,7 @@
606
  "extra voeding",
607
  "EHBO-kit"
608
  ],
609
- "avalanche-prone terrain": [
610
  "lawinepieper",
611
  "schep",
612
  "sonde",
 
535
  "USB-hub (voor meerdere devices)",
536
  "verpakking om elektronica droog te houden"
537
  ],
538
+ "self-supported (bring your own cooking gear)": [
539
  "lichtgewicht kooktoestel (gas, benzine of alcohol)",
540
  "brandstof (voldoende voor aantal dagen)",
541
  "pan of keteltje",
 
552
  "minstens 2 liter wateropslag per persoon",
553
  "food bag of hangzak voor voedsel (wild-safe)"
554
  ],
555
+ "travel with children": [
556
  "snacks en speelgoed",
557
  "EHBO-set met pleisters",
558
  "extra kleding",
 
606
  "extra voeding",
607
  "EHBO-kit"
608
  ],
609
+ "snow, ice and avalanche-prone terrain": [
610
  "lawinepieper",
611
  "schep",
612
  "sonde",
space/results/model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd1d8f5ed1d8b9a0878aa0e99fa144341c27d48bcf71d77390e28951e4936149
3
+ size 9539
space/space/results/model_b_sileod-deberta-v3-base-tasksource-nli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98f1217c7add9665e814d4ef90a8e1a60e33d53ea81c6c98472bc652aa11ee56
3
+ size 9461
space/space/space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
space/space/space/packing_label_hierarchical_mapping.json CHANGED
@@ -247,11 +247,11 @@
247
  "category": "accommodation",
248
  "superclass": "type"
249
  },
250
- "transportation: vehicle": {
251
  "category": "transportation",
252
  "superclass": "mode"
253
  },
254
- "transportation: no vehicle": {
255
  "category": "transportation",
256
  "superclass": "mode"
257
  },
 
247
  "category": "accommodation",
248
  "superclass": "type"
249
  },
250
+ "transportation: own vehicle": {
251
  "category": "transportation",
252
  "superclass": "mode"
253
  },
254
+ "transportation: no own vehicle": {
255
  "category": "transportation",
256
  "superclass": "mode"
257
  },
space/space/space/packing_label_structure.json CHANGED
@@ -69,8 +69,8 @@
69
  "sleeping in a car"
70
  ],
71
  "transportation": [
72
- "vehicle",
73
- "no vehicle"
74
  ],
75
  "special_conditions": [
76
  "off-grid / no electricity",
 
69
  "sleeping in a car"
70
  ],
71
  "transportation": [
72
+ "own vehicle",
73
+ "no own vehicle"
74
  ],
75
  "special_conditions": [
76
  "off-grid / no electricity",
space/space/space/packing_templates_self_supported_offgrid_expanded.json CHANGED
@@ -507,14 +507,14 @@
507
  "snacks voor de nacht",
508
  "thermische deken (voor koude nachten)"
509
  ],
510
- "vehicle": [
511
  "rijbewijs",
512
  "autopapieren",
513
  "EHBO-set",
514
  "navigatie of smartphone",
515
  "telefoonhouder"
516
  ],
517
- "no vehicle": [
518
  "rugzak",
519
  "waterfles",
520
  "lichte schoenen",
 
507
  "snacks voor de nacht",
508
  "thermische deken (voor koude nachten)"
509
  ],
510
+ "own vehicle": [
511
  "rijbewijs",
512
  "autopapieren",
513
  "EHBO-set",
514
  "navigatie of smartphone",
515
  "telefoonhouder"
516
  ],
517
+ "no own vehicle": [
518
  "rugzak",
519
  "waterfles",
520
  "lichte schoenen",
space/space/space/results/model_a_facebook-bart-large-mnli_results.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f5c3b28d1fb60f40fa22ebbe9a32e10f6a81b61beaf376c620eabc6912a72e7
3
+ size 9445
space/space/space/space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
space/space/space/space/gradio_tryout.ipynb CHANGED
@@ -24,7 +24,7 @@
24
  },
25
  {
26
  "cell_type": "code",
27
- "execution_count": 16,
28
  "id": "f8c28d2d-8458-49fd-8ebf-5e729d6e861f",
29
  "metadata": {},
30
  "outputs": [
@@ -39,9 +39,9 @@
39
  "name": "stdout",
40
  "output_type": "stream",
41
  "text": [
42
- "First trip: I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands. \n",
43
  "\n",
44
- "Trip type: ['beach vacation', ['swimming', 'going to the beach', 'relaxing', 'hiking'], 'warm destination / summer', 'lightweight (but comfortable)', 'casual', 'indoor', 'no vehicle', 'no special conditions', '7 days']\n"
45
  ]
46
  }
47
  ],
@@ -51,9 +51,16 @@
51
  "from transformers import pipeline\n",
52
  "import json\n",
53
  "import pandas as pd\n",
 
 
 
 
54
  "\n",
55
  "# Load the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
56
- "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-large-mnli\")\n",
 
 
 
57
  "\n",
58
  "# get candidate labels\n",
59
  "with open(\"packing_label_structure.json\", \"r\") as file:\n",
@@ -68,9 +75,9 @@
68
  "trip_types = [trip['trip_types'] for trip in packing_data]\n",
69
  "\n",
70
  "# Access the first trip description\n",
71
- "first_trip = trip_descriptions[0]\n",
72
  "# Get the packing list for the secondfirst trip\n",
73
- "first_trip_type = trip_types[0]\n",
74
  "\n",
75
  "print(f\"First trip: {first_trip} \\n\")\n",
76
  "print(f\"Trip type: {first_trip_type}\")"
@@ -78,196 +85,385 @@
78
  },
79
  {
80
  "cell_type": "code",
81
- "execution_count": 37,
82
- "id": "fed1f8bc-5baf-46e7-8763-9d56fb9c536b",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  "metadata": {
84
  "scrolled": true
85
  },
86
  "outputs": [
 
 
 
 
 
 
 
87
  {
88
  "name": "stdout",
89
  "output_type": "stream",
90
  "text": [
91
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['beach vacation', 'micro-adventure / weekend trip', 'nature escape', 'digital nomad trip', 'cultural exploration', 'yoga / wellness retreat', 'festival trip', 'long-distance hike / thru-hike', 'hut trek (summer)', 'city trip', 'road trip (car/camper)', 'ski tour / skitour', 'camping trip (campground)', 'snowboard / splitboard trip', 'camping trip (wild camping)', 'hut trek (winter)'], 'scores': [0.37631064653396606, 0.35016775131225586, 0.13397355377674103, 0.031636204570531845, 0.031270742416381836, 0.012846449390053749, 0.012699575163424015, 0.009526746347546577, 0.008148356340825558, 0.007793044205754995, 0.006512156222015619, 0.005669699050486088, 0.0044484627433121204, 0.004113250877708197, 0.002713854657486081, 0.002169555053114891]}\n",
92
  "beach vacation\n",
93
  "0\n",
94
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['going to the beach', 'relaxing', 'hiking', 'swimming', 'sightseeing', 'running', 'hut-to-hut hiking', 'biking', 'photography', 'surfing', 'stand-up paddleboarding (SUP)', 'snorkeling', 'yoga', 'kayaking / canoeing', 'rock climbing', 'fishing', 'paragliding', 'rafting', 'horseback riding', 'snowshoe hiking', 'cross-country skiing', 'ice climbing', 'skiing', 'scuba diving', 'ski touring'], 'scores': [0.9914858341217041, 0.9771362543106079, 0.9426282048225403, 0.21901991963386536, 0.17586199939250946, 0.09854521602392197, 0.08370419591665268, 0.03679152950644493, 0.03668990358710289, 0.03099300153553486, 0.025300050154328346, 0.021451234817504883, 0.011070131324231625, 0.0075112744234502316, 0.006306737195700407, 0.0034973458386957645, 0.002655829070135951, 0.00197031581774354, 0.0015599008183926344, 0.001527810120023787, 0.0015017405385151505, 0.0014336870517581701, 0.0011686616344377398, 0.000789369223639369, 0.0004912536824122071]}\n",
95
- "['going to the beach', 'relaxing', 'hiking']\n",
96
  "1\n",
97
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['warm destination / summer', 'tropical / humid', 'variable weather / spring / autumn', 'dry / desert-like', 'cold destination / winter'], 'scores': [0.6468702554702759, 0.19999535381793976, 0.09394325315952301, 0.05279730260372162, 0.0063938056118786335]}\n",
98
- "warm destination / summer\n",
99
  "2\n",
100
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['minimalist', 'ultralight', 'lightweight (but comfortable)', 'luxury (including evening wear)'], 'scores': [0.4286234974861145, 0.2564568817615509, 0.2147122174501419, 0.10020739585161209]}\n",
101
  "minimalist\n",
102
  "3\n",
103
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['casual', 'conservative', 'formal (business trip)'], 'scores': [0.6567223072052002, 0.3034382164478302, 0.039839524775743484]}\n",
104
  "casual\n",
105
  "4\n",
106
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['huts with half board', 'indoor', 'sleeping in a car', 'sleeping in a tent'], 'scores': [0.5007699728012085, 0.34074831008911133, 0.10416240990161896, 0.05431929975748062]}\n",
107
- "huts with half board\n",
108
  "5\n",
109
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['vehicle', 'no vehicle'], 'scores': [0.7521055936813354, 0.24789436161518097]}\n",
110
- "vehicle\n",
111
  "6\n",
112
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['off-grid / no electricity', 'self-supported (bring your own food/cooking)', 'no special conditions', 'pet-friendly', 'rainy climate', 'child-friendly', 'snow and ice', 'high alpine terrain', 'avalanche-prone terrain'], 'scores': [0.46220096945762634, 0.12957870960235596, 0.10651793330907822, 0.09777138382196426, 0.06722460687160492, 0.0632496327161789, 0.04952802509069443, 0.015049820765852928, 0.008878983557224274]}\n",
113
  "off-grid / no electricity\n",
114
  "7\n",
115
- "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['7+ days', '2 days', '1 day', '6 days', '3 days', '4 days', '7 days', '5 days'], 'scores': [0.21139054000377655, 0.18512114882469177, 0.14520084857940674, 0.0976138487458229, 0.094282366335392, 0.09376301616430283, 0.09161651134490967, 0.08101171255111694]}\n",
116
  "7+ days\n",
117
- "8\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  ]
119
  }
120
  ],
121
  "source": [
122
- "# Create an empty DataFrame with specified columns\n",
123
- "df = pd.DataFrame(columns=['superclass', 'pred_class'])\n",
124
- "cutoff = 0.5 # used to choose which activities are relevant\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  "\n",
126
- "# fill DataFrame\n",
127
- "for i, key in enumerate(keys_list):\n",
128
- " # Run the classification (ca 30 seconds classifying)\n",
129
- " if key == 'activities':\n",
130
- " result = classifier(first_trip, candidate_labels[key], multi_label=True)\n",
131
- " indices = [i for i, score in enumerate(result['scores']) if score > cutoff]\n",
132
- " classes = [result['labels'][i] for i in indices]\n",
133
- " else:\n",
134
- " result = classifier(first_trip, candidate_labels[key])\n",
135
- " classes = result[\"labels\"][0]\n",
136
- " print(result)\n",
137
- " print(classes)\n",
138
- " print(i)\n",
139
- " df.loc[i] = [key, classes]\n",
140
  "\n",
141
- "df['true_class'] = first_trip_type"
142
  ]
143
  },
144
  {
145
  "cell_type": "code",
146
- "execution_count": 40,
147
- "id": "b3b51280-76a1-4229-a9de-070b925d3463",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  "metadata": {
149
  "scrolled": true
150
  },
151
- "outputs": [
152
- {
153
- "name": "stdout",
154
- "output_type": "stream",
155
- "text": [
156
- " Superclass pred_class true_class\n",
157
- "0 activity_type beach vacation beach vacation\n",
158
- "1 activities [going to the beach, relaxing, hiking] [swimming, going to the beach, relaxing, hiking]\n",
159
- "2 climate_or_season warm destination / summer warm destination / summer\n",
160
- "3 style_or_comfort minimalist lightweight (but comfortable)\n",
161
- "4 dress_code casual casual\n",
162
- "5 accommodation huts with half board indoor\n",
163
- "6 transportation vehicle no vehicle\n",
164
- "7 special_conditions off-grid / no electricity no special conditions\n",
165
- "8 trip_length_days 7+ days 7 days\n"
166
- ]
167
- }
168
- ],
169
  "source": [
170
- "pd.set_option('display.width', 1000) \n",
171
- "pd.set_option('display.max_columns', None)\n",
172
- "print(df)"
 
 
 
 
 
173
  ]
174
  },
175
  {
176
  "cell_type": "code",
177
- "execution_count": 42,
178
- "id": "2ec09e8f-75f5-45b1-b4c0-4fafd685d36b",
179
  "metadata": {},
180
- "outputs": [
181
- {
182
- "name": "stdout",
183
- "output_type": "stream",
184
- "text": [
185
- " Superclass pred_class true_class same\n",
186
- "0 activity_type beach vacation beach vacation True\n",
187
- "1 activities [going to the beach, relaxing, hiking] [swimming, going to the beach, relaxing, hiking] False\n",
188
- "2 climate_or_season warm destination / summer warm destination / summer True\n",
189
- "3 style_or_comfort minimalist lightweight (but comfortable) False\n",
190
- "4 dress_code casual casual True\n",
191
- "5 accommodation huts with half board indoor False\n",
192
- "6 transportation vehicle no vehicle False\n",
193
- "7 special_conditions off-grid / no electricity no special conditions False\n",
194
- "8 trip_length_days 7+ days 7 days False\n"
195
- ]
196
- }
197
- ],
198
  "source": [
199
- "df['same'] = df['pred_class'] == df['true_class']\n",
200
- "print(df)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  ]
202
  },
203
  {
204
- "cell_type": "code",
205
- "execution_count": 62,
206
- "id": "82ae19c8-8bb7-4f7f-841b-1cb6501a17a7",
207
  "metadata": {},
208
- "outputs": [
209
- {
210
- "name": "stdout",
211
- "output_type": "stream",
212
- "text": [
213
- "Accuracy (excluding activities): 0.3333333333333333\n"
214
- ]
215
- }
216
- ],
217
  "source": [
218
- "# accuracy excluding activities\n",
219
- "correct = sum(df.loc[df.index != 1, 'same'])\n",
220
- "total = len(df['same'])\n",
221
- "accuracy = correct/total\n",
222
- "print(\"Accuracy (excluding activities):\", accuracy)"
223
  ]
224
  },
225
  {
226
  "cell_type": "code",
227
- "execution_count": 64,
228
- "id": "16c0a3ae-34ac-49a4-b59f-411a6f0ce947",
229
  "metadata": {},
230
  "outputs": [
231
  {
232
  "name": "stdout",
233
  "output_type": "stream",
234
  "text": [
235
- "Percentage of true classes that were identified: 0.75\n",
236
- "Percentage of predicted classes that were wrong: 0.0\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  ]
238
  }
239
  ],
240
  "source": [
241
- "pred_class = df.loc[df.index == 1, 'pred_class'].iloc[0]\n",
242
- "true_class = df.loc[df.index == 1, 'true_class'].iloc[0]\n",
243
- "correct = [label for label in pred_class if label in true_class]\n",
244
  "\n",
245
- "num_correct = len(correct)\n",
246
- "correct_perc = num_correct/len(true_class)\n",
247
  "\n",
248
- "num_pred = len(pred_class)\n",
249
- "wrong_perc = (num_pred - num_correct)/num_pred\n",
 
 
 
 
 
 
 
 
250
  "\n",
251
- "print(\"Percentage of true classes that were identified:\", correct_perc)\n",
252
- "print(\"Percentage of predicted classes that were wrong:\", wrong_perc)"
253
- ]
254
- },
255
- {
256
- "cell_type": "markdown",
257
- "id": "62c5c18c-58f4-465c-a188-c57cfa7ffa90",
258
- "metadata": {},
259
- "source": [
260
- "Now do the same for all trips"
 
 
261
  ]
262
  },
263
- {
264
- "cell_type": "code",
265
- "execution_count": null,
266
- "id": "e4192b42-f1bc-4fcb-a238-dbdb3df7d699",
267
- "metadata": {},
268
- "outputs": [],
269
- "source": []
270
- },
271
  {
272
  "cell_type": "markdown",
273
  "id": "17483df4-55c4-41cd-b8a9-61f7a5c7e8a3",
 
24
  },
25
  {
26
  "cell_type": "code",
27
+ "execution_count": 40,
28
  "id": "f8c28d2d-8458-49fd-8ebf-5e729d6e861f",
29
  "metadata": {},
30
  "outputs": [
 
39
  "name": "stdout",
40
  "output_type": "stream",
41
  "text": [
42
+ "First trip: We are a couple in our thirties traveling to Vienna for a three-day city trip. We’ll be staying at a friend’s house and plan to explore the city by sightseeing, strolling through the streets, visiting markets, and trying out great restaurants and cafés. We also hope to attend a classical music concert. Our journey to Vienna will be by train. \n",
43
  "\n",
44
+ "Trip type: ['city trip', ['sightseeing'], 'variable weather / spring / autumn', 'luxury (including evening wear)', 'casual', 'indoor', 'no own vehicle', 'no special condition', '3 days']\n"
45
  ]
46
  }
47
  ],
 
51
  "from transformers import pipeline\n",
52
  "import json\n",
53
  "import pandas as pd\n",
54
+ "import matplotlib.pyplot as plt\n",
55
+ "import pickle\n",
56
+ "import os\n",
57
+ "import time\n",
58
  "\n",
59
  "# Load the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
60
+ "classifier = pipeline(\"zero-shot-classification\", model=\"MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli\")\n",
61
+ "# tried:\n",
62
+ "# facebook/bart-large-mnli\n",
63
+ "# sileod/deberta-v3-base-tasksource-nli\n",
64
  "\n",
65
  "# get candidate labels\n",
66
  "with open(\"packing_label_structure.json\", \"r\") as file:\n",
 
75
  "trip_types = [trip['trip_types'] for trip in packing_data]\n",
76
  "\n",
77
  "# Access the first trip description\n",
78
+ "first_trip = trip_descriptions[1]\n",
79
  "# Get the packing list for the secondfirst trip\n",
80
+ "first_trip_type = trip_types[1]\n",
81
  "\n",
82
  "print(f\"First trip: {first_trip} \\n\")\n",
83
  "print(f\"Trip type: {first_trip_type}\")"
 
85
  },
86
  {
87
  "cell_type": "code",
88
+ "execution_count": 41,
89
+ "id": "3a762755-872d-43a6-b666-874d6133488c",
90
+ "metadata": {},
91
+ "outputs": [],
92
+ "source": [
93
+ "# function that returns pandas data frame with predictions\n",
94
+ "\n",
95
+ "cut_off = 0.5 # used to choose which activities are relevant\n",
96
+ "\n",
97
+ "def pred_trip(trip_descr, trip_type, cut_off):\n",
98
+ " # Create an empty DataFrame with specified columns\n",
99
+ " df = pd.DataFrame(columns=['superclass', 'pred_class'])\n",
100
+ " for i, key in enumerate(keys_list):\n",
101
+ " if key == 'activities':\n",
102
+ " result = classifier(trip_descr, candidate_labels[key], multi_label=True)\n",
103
+ " indices = [i for i, score in enumerate(result['scores']) if score > cut_off]\n",
104
+ " classes = [result['labels'][i] for i in indices]\n",
105
+ " else:\n",
106
+ " result = classifier(trip_descr, candidate_labels[key])\n",
107
+ " classes = result[\"labels\"][0]\n",
108
+ " print(result)\n",
109
+ " print(classes)\n",
110
+ " print(i)\n",
111
+ " df.loc[i] = [key, classes]\n",
112
+ " df['true_class'] = trip_type\n",
113
+ " return df"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": 42,
119
+ "id": "3b4f3193-3bdd-453c-8664-df84f955600c",
120
+ "metadata": {},
121
+ "outputs": [],
122
+ "source": [
123
+ "# function for accuracy, perc true classes identified and perc wrong pred classes\n",
124
+ "\n",
125
+ "def perf_measure(df):\n",
126
+ " df['same_value'] = df['pred_class'] == df['true_class']\n",
127
+ " correct = sum(df.loc[df.index != 1, 'same_value'])\n",
128
+ " total = len(df['same_value'])\n",
129
+ " accuracy = correct/total\n",
130
+ " pred_class = df.loc[df.index == 1, 'pred_class'].iloc[0]\n",
131
+ " true_class = df.loc[df.index == 1, 'true_class'].iloc[0]\n",
132
+ " correct = [label for label in pred_class if label in true_class]\n",
133
+ " num_correct = len(correct)\n",
134
+ " correct_perc = num_correct/len(true_class)\n",
135
+ " num_pred = len(pred_class)\n",
136
+ " wrong_perc = (num_pred - num_correct)/num_pred\n",
137
+ " df_perf = pd.DataFrame({\n",
138
+ " 'accuracy': [accuracy],\n",
139
+ " 'true_ident': [correct_perc],\n",
140
+ " 'false_pred': [wrong_perc]\n",
141
+ " })\n",
142
+ " return(df_perf)"
143
+ ]
144
+ },
145
+ {
146
+ "cell_type": "markdown",
147
+ "id": "62c5c18c-58f4-465c-a188-c57cfa7ffa90",
148
+ "metadata": {},
149
+ "source": [
150
+ "**Now do the same for all trips**"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "code",
155
+ "execution_count": 43,
156
+ "id": "4dd01755-be8d-4904-8494-ac28aba2fee7",
157
  "metadata": {
158
  "scrolled": true
159
  },
160
  "outputs": [
161
+ {
162
+ "name": "stderr",
163
+ "output_type": "stream",
164
+ "text": [
165
+ "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
166
+ ]
167
+ },
168
  {
169
  "name": "stdout",
170
  "output_type": "stream",
171
  "text": [
172
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['beach vacation', 'micro-adventure / weekend trip', 'cultural exploration', 'nature escape', 'digital nomad trip', 'camping trip (campground)', 'camping trip (wild camping)', 'long-distance hike / thru-hike', 'ski tour / skitour', 'hut trek (summer)', 'city trip', 'hut trek (winter)', 'road trip (car/camper)', 'festival trip', 'yoga / wellness retreat', 'snowboard / splitboard trip'], 'scores': [0.37198853492736816, 0.31496119499206543, 0.10890532284975052, 0.09102731198072433, 0.0735681876540184, 0.012933704070746899, 0.009422042407095432, 0.0051276967860758305, 0.004056071396917105, 0.0017408831045031548, 0.001503779087215662, 0.0014244643971323967, 0.0013752576196566224, 0.0009292717440985143, 0.0006881792796775699, 0.0003480584127828479]}\n",
173
  "beach vacation\n",
174
  "0\n",
175
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['relaxing', 'hiking', 'going to the beach', 'photography', 'sightseeing', 'hut-to-hut hiking', 'snorkeling', 'snowshoe hiking', 'yoga', 'stand-up paddleboarding (SUP)', 'kayaking / canoeing', 'horseback riding', 'swimming', 'paragliding', 'rafting', 'biking', 'rock climbing', 'surfing', 'running', 'ice climbing', 'cross-country skiing', 'fishing', 'ski touring', 'skiing', 'scuba diving'], 'scores': [0.9943736791610718, 0.9631249308586121, 0.9454535841941833, 0.7538902759552002, 0.4525446593761444, 0.1696157604455948, 0.05957728251814842, 0.04234873503446579, 0.01991761103272438, 0.016971556469798088, 0.006959819234907627, 0.00411367928609252, 0.0030609173700213432, 0.00186573073733598, 0.0017515394138172269, 0.00142807571683079, 0.0005748369731009007, 0.00037779140984639525, 0.0003097739245276898, 0.00030914091621525586, 0.0002725012309383601, 0.00027050732751376927, 0.00024376016517635435, 0.00017392759036738425, 0.00014787293912377208]}\n",
176
+ "['relaxing', 'hiking', 'going to the beach', 'photography']\n",
177
  "1\n",
178
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['tropical / humid', 'warm destination / summer', 'variable weather / spring / autumn', 'cold destination / winter', 'dry / desert-like'], 'scores': [0.4895477890968323, 0.25917261838912964, 0.24829530715942383, 0.0017174285603687167, 0.0012668712297454476]}\n",
179
+ "tropical / humid\n",
180
  "2\n",
181
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['minimalist', 'luxury (including evening wear)', 'lightweight (but comfortable)', 'ultralight'], 'scores': [0.7574900984764099, 0.09964746236801147, 0.07804173231124878, 0.06482075154781342]}\n",
182
  "minimalist\n",
183
  "3\n",
184
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['casual', 'conservative', 'formal (business trip)'], 'scores': [0.8163393139839172, 0.11898067593574524, 0.06467998772859573]}\n",
185
  "casual\n",
186
  "4\n",
187
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['indoor', 'huts with half board', 'sleeping in a car', 'sleeping in a tent'], 'scores': [0.6389047503471375, 0.18624886870384216, 0.13902997970581055, 0.03581654652953148]}\n",
188
+ "indoor\n",
189
  "5\n",
190
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['no own vehicle', 'own vehicle'], 'scores': [0.9990958571434021, 0.0009041387238539755]}\n",
191
+ "no own vehicle\n",
192
  "6\n",
193
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['off-grid / no electricity', 'self-supported (bring your own food/cooking)', 'child-friendly', 'no special conditions', 'pet-friendly', 'rainy climate', 'avalanche-prone terrain', 'high alpine terrain', 'snow and ice'], 'scores': [0.7414510250091553, 0.07683143764734268, 0.055722303688526154, 0.054133761674165726, 0.04852374270558357, 0.006977608893066645, 0.005693929269909859, 0.005599685944616795, 0.005066512618213892]}\n",
194
  "off-grid / no electricity\n",
195
  "7\n",
196
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['7+ days', '2 days', '7 days', '3 days', '4 days', '6 days', '5 days', '1 day'], 'scores': [0.29225289821624756, 0.20232954621315002, 0.1837582290172577, 0.13940994441509247, 0.06562349200248718, 0.04916509613394737, 0.040249694138765335, 0.0272111464291811]}\n",
197
  "7+ days\n",
198
+ "8\n",
199
+ " superclass pred_class \\\n",
200
+ "0 activity_type beach vacation \n",
201
+ "1 activities [relaxing, hiking, going to the beach, photogr... \n",
202
+ "2 climate_or_season tropical / humid \n",
203
+ "3 style_or_comfort minimalist \n",
204
+ "4 dress_code casual \n",
205
+ "5 accommodation indoor \n",
206
+ "6 transportation no own vehicle \n",
207
+ "7 special_conditions off-grid / no electricity \n",
208
+ "8 trip_length_days 7+ days \n",
209
+ "\n",
210
+ " true_class \n",
211
+ "0 beach vacation \n",
212
+ "1 [swimming, going to the beach, relaxing, hiking] \n",
213
+ "2 warm destination / summer \n",
214
+ "3 lightweight (but comfortable) \n",
215
+ "4 casual \n",
216
+ "5 indoor \n",
217
+ "6 no own vehicle \n",
218
+ "7 no special conditions \n",
219
+ "8 7+ days \n",
220
+ " accuracy true_ident false_pred\n",
221
+ "0 0.555556 0.75 0.25\n",
222
+ "{'sequence': 'We are a couple in our thirties traveling to Vienna for a three-day city trip. We’ll be staying at a friend’s house and plan to explore the city by sightseeing, strolling through the streets, visiting markets, and trying out great restaurants and cafés. We also hope to attend a classical music concert. Our journey to Vienna will be by train.', 'labels': ['city trip', 'cultural exploration', 'micro-adventure / weekend trip', 'ski tour / skitour', 'festival trip', 'digital nomad trip', 'hut trek (winter)', 'camping trip (campground)', 'long-distance hike / thru-hike', 'hut trek (summer)', 'nature escape', 'camping trip (wild camping)', 'yoga / wellness retreat', 'road trip (car/camper)', 'beach vacation', 'snowboard / splitboard trip'], 'scores': [0.517789363861084, 0.297355592250824, 0.1621870994567871, 0.006185388192534447, 0.005294559057801962, 0.002764208009466529, 0.001503965351730585, 0.0014866390265524387, 0.0012240204960107803, 0.0012071850942447782, 0.000757778063416481, 0.0006650012801401317, 0.0005547589971683919, 0.00043604226084426045, 0.00031738984398543835, 0.0002710542466957122]}\n",
223
+ "city trip\n",
224
+ "0\n"
225
+ ]
226
+ },
227
+ {
228
+ "ename": "KeyboardInterrupt",
229
+ "evalue": "",
230
+ "output_type": "error",
231
+ "traceback": [
232
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
233
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
234
+ "Cell \u001b[0;32mIn[43], line 7\u001b[0m\n\u001b[1;32m 5\u001b[0m current_trip \u001b[38;5;241m=\u001b[39m trip_descriptions[i]\n\u001b[1;32m 6\u001b[0m current_type \u001b[38;5;241m=\u001b[39m trip_types[i]\n\u001b[0;32m----> 7\u001b[0m df \u001b[38;5;241m=\u001b[39m \u001b[43mpred_trip\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcurrent_trip\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcurrent_type\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcut_off\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0.5\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28mprint\u001b[39m(df)\n\u001b[1;32m 10\u001b[0m \u001b[38;5;66;03m# accuracy, perc true classes identified and perc wrong pred classes\u001b[39;00m\n",
235
+ "Cell \u001b[0;32mIn[41], line 10\u001b[0m, in \u001b[0;36mpred_trip\u001b[0;34m(trip_descr, trip_type, cut_off)\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, key \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(keys_list):\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m key \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mactivities\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[0;32m---> 10\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mclassifier\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrip_descr\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcandidate_labels\u001b[49m\u001b[43m[\u001b[49m\u001b[43mkey\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_label\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m indices \u001b[38;5;241m=\u001b[39m [i \u001b[38;5;28;01mfor\u001b[39;00m i, score \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(result[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mscores\u001b[39m\u001b[38;5;124m'\u001b[39m]) \u001b[38;5;28;01mif\u001b[39;00m score \u001b[38;5;241m>\u001b[39m cut_off]\n\u001b[1;32m 12\u001b[0m classes \u001b[38;5;241m=\u001b[39m [result[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlabels\u001b[39m\u001b[38;5;124m'\u001b[39m][i] \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m indices]\n",
236
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/zero_shot_classification.py:206\u001b[0m, in \u001b[0;36mZeroShotClassificationPipeline.__call__\u001b[0;34m(self, sequences, *args, **kwargs)\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 204\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnable to understand extra arguments \u001b[39m\u001b[38;5;132;01m{\u001b[39;00margs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 206\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__call__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43msequences\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
237
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/base.py:1294\u001b[0m, in \u001b[0;36mPipeline.__call__\u001b[0;34m(self, inputs, num_workers, batch_size, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39miterate(inputs, preprocess_params, forward_params, postprocess_params)\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mframework \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m, ChunkPipeline):\n\u001b[0;32m-> 1294\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1295\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43miter\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1296\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_iterator\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1297\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_workers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpreprocess_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mforward_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpostprocess_params\u001b[49m\n\u001b[1;32m 1298\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1299\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1300\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1301\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1302\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrun_single(inputs, preprocess_params, forward_params, postprocess_params)\n",
238
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/pt_utils.py:124\u001b[0m, in \u001b[0;36mPipelineIterator.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloader_batch_item()\n\u001b[1;32m 123\u001b[0m \u001b[38;5;66;03m# We're out of items within a batch\u001b[39;00m\n\u001b[0;32m--> 124\u001b[0m item \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 125\u001b[0m processed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minfer(item, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparams)\n\u001b[1;32m 126\u001b[0m \u001b[38;5;66;03m# We now have a batch of \"inferred things\".\u001b[39;00m\n",
239
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/pt_utils.py:269\u001b[0m, in \u001b[0;36mPipelinePackIterator.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m accumulator\n\u001b[1;32m 268\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_last:\n\u001b[0;32m--> 269\u001b[0m processed \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minfer\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 270\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloader_batch_size \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 271\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(processed, torch\u001b[38;5;241m.\u001b[39mTensor):\n",
240
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/base.py:1209\u001b[0m, in \u001b[0;36mPipeline.forward\u001b[0;34m(self, model_inputs, **forward_params)\u001b[0m\n\u001b[1;32m 1207\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m inference_context():\n\u001b[1;32m 1208\u001b[0m model_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_inputs, device\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[0;32m-> 1209\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mforward_params\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1210\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_outputs, device\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mdevice(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcpu\u001b[39m\u001b[38;5;124m\"\u001b[39m))\n\u001b[1;32m 1211\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
241
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/pipelines/zero_shot_classification.py:229\u001b[0m, in \u001b[0;36mZeroShotClassificationPipeline._forward\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 227\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muse_cache\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m inspect\u001b[38;5;241m.\u001b[39msignature(model_forward)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m 228\u001b[0m model_inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muse_cache\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[0;32m--> 229\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 231\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 232\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcandidate_label\u001b[39m\u001b[38;5;124m\"\u001b[39m: candidate_label,\n\u001b[1;32m 233\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msequence\u001b[39m\u001b[38;5;124m\"\u001b[39m: sequence,\n\u001b[1;32m 234\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mis_last\u001b[39m\u001b[38;5;124m\"\u001b[39m: inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mis_last\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[1;32m 235\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39moutputs,\n\u001b[1;32m 236\u001b[0m }\n\u001b[1;32m 237\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model_outputs\n",
242
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
243
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
244
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:1297\u001b[0m, in \u001b[0;36mDebertaV2ForSequenceClassification.forward\u001b[0;34m(self, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1289\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1290\u001b[0m \u001b[38;5;124;03mlabels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\u001b[39;00m\n\u001b[1;32m 1291\u001b[0m \u001b[38;5;124;03m Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\u001b[39;00m\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;124;03m config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\u001b[39;00m\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;124;03m `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\u001b[39;00m\n\u001b[1;32m 1294\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 1295\u001b[0m return_dict \u001b[38;5;241m=\u001b[39m return_dict \u001b[38;5;28;01mif\u001b[39;00m return_dict \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_return_dict\n\u001b[0;32m-> 1297\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdeberta\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1298\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1299\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken_type_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken_type_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1300\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1301\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1302\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1303\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1304\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1305\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1306\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1308\u001b[0m encoder_layer \u001b[38;5;241m=\u001b[39m outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 1309\u001b[0m pooled_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpooler(encoder_layer)\n",
245
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
246
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
247
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:1063\u001b[0m, in \u001b[0;36mDebertaV2Model.forward\u001b[0;34m(self, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1053\u001b[0m token_type_ids \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mzeros(input_shape, dtype\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mlong, device\u001b[38;5;241m=\u001b[39mdevice)\n\u001b[1;32m 1055\u001b[0m embedding_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membeddings(\n\u001b[1;32m 1056\u001b[0m input_ids\u001b[38;5;241m=\u001b[39minput_ids,\n\u001b[1;32m 1057\u001b[0m token_type_ids\u001b[38;5;241m=\u001b[39mtoken_type_ids,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1060\u001b[0m inputs_embeds\u001b[38;5;241m=\u001b[39minputs_embeds,\n\u001b[1;32m 1061\u001b[0m )\n\u001b[0;32m-> 1063\u001b[0m encoder_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1064\u001b[0m \u001b[43m \u001b[49m\u001b[43membedding_output\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1065\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1066\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 1067\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1068\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1069\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1070\u001b[0m encoded_layers \u001b[38;5;241m=\u001b[39m encoder_outputs[\u001b[38;5;241m1\u001b[39m]\n\u001b[1;32m 1072\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mz_steps \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m:\n",
248
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
249
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
250
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:507\u001b[0m, in \u001b[0;36mDebertaV2Encoder.forward\u001b[0;34m(self, hidden_states, attention_mask, output_hidden_states, output_attentions, query_states, relative_pos, return_dict)\u001b[0m\n\u001b[1;32m 497\u001b[0m output_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_gradient_checkpointing_func(\n\u001b[1;32m 498\u001b[0m layer_module\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__call__\u001b[39m,\n\u001b[1;32m 499\u001b[0m next_kv,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 504\u001b[0m output_attentions,\n\u001b[1;32m 505\u001b[0m )\n\u001b[1;32m 506\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 507\u001b[0m output_states \u001b[38;5;241m=\u001b[39m \u001b[43mlayer_module\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 508\u001b[0m \u001b[43m \u001b[49m\u001b[43mnext_kv\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 509\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 510\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 511\u001b[0m \u001b[43m \u001b[49m\u001b[43mrelative_pos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_pos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 512\u001b[0m \u001b[43m \u001b[49m\u001b[43mrel_embeddings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrel_embeddings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 513\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 514\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 516\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 517\u001b[0m output_states, att_m \u001b[38;5;241m=\u001b[39m output_states\n",
251
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
252
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
253
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:355\u001b[0m, in \u001b[0;36mDebertaV2Layer.forward\u001b[0;34m(self, hidden_states, attention_mask, query_states, relative_pos, rel_embeddings, output_attentions)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[1;32m 347\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 348\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 353\u001b[0m output_attentions\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 354\u001b[0m ):\n\u001b[0;32m--> 355\u001b[0m attention_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 356\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 357\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 358\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 359\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 360\u001b[0m \u001b[43m \u001b[49m\u001b[43mrelative_pos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_pos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 361\u001b[0m \u001b[43m \u001b[49m\u001b[43mrel_embeddings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrel_embeddings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 362\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 363\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 364\u001b[0m attention_output, att_matrix \u001b[38;5;241m=\u001b[39m attention_output\n",
254
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
255
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
256
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:286\u001b[0m, in \u001b[0;36mDebertaV2Attention.forward\u001b[0;34m(self, hidden_states, attention_mask, output_attentions, query_states, relative_pos, rel_embeddings)\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[1;32m 278\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 279\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 284\u001b[0m rel_embeddings\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 285\u001b[0m ):\n\u001b[0;32m--> 286\u001b[0m self_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mself\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 287\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 288\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 289\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 290\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 291\u001b[0m \u001b[43m \u001b[49m\u001b[43mrelative_pos\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrelative_pos\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 292\u001b[0m \u001b[43m \u001b[49m\u001b[43mrel_embeddings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrel_embeddings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 293\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 294\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m output_attentions:\n\u001b[1;32m 295\u001b[0m self_output, att_matrix \u001b[38;5;241m=\u001b[39m self_output\n",
257
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
258
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
259
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py:700\u001b[0m, in \u001b[0;36mDisentangledSelfAttention.forward\u001b[0;34m(self, hidden_states, attention_mask, output_attentions, query_states, relative_pos, rel_embeddings)\u001b[0m\n\u001b[1;32m 698\u001b[0m query_states \u001b[38;5;241m=\u001b[39m hidden_states\n\u001b[1;32m 699\u001b[0m query_layer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtranspose_for_scores(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mquery_proj(query_states), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_attention_heads)\n\u001b[0;32m--> 700\u001b[0m key_layer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtranspose_for_scores(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mkey_proj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m)\u001b[49m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_attention_heads)\n\u001b[1;32m 701\u001b[0m value_layer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtranspose_for_scores(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvalue_proj(hidden_states), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_attention_heads)\n\u001b[1;32m 703\u001b[0m rel_att \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
260
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
261
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
262
+ "File \u001b[0;32m~/opt/anaconda3/envs/huggingface_env/lib/python3.8/site-packages/torch/nn/modules/linear.py:116\u001b[0m, in \u001b[0;36mLinear.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 116\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlinear\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
263
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
264
  ]
265
  }
266
  ],
267
  "source": [
268
+ "result_list = []\n",
269
+ "performance = pd.DataFrame(columns=['accuracy', 'true_ident', 'false_pred'])\n",
270
+ "\n",
271
+ "start_time = time.time()\n",
272
+ "for i in range(len(trip_descriptions)):\n",
273
+ " current_trip = trip_descriptions[i]\n",
274
+ " current_type = trip_types[i]\n",
275
+ " df = pred_trip(current_trip, current_type, cut_off = 0.5)\n",
276
+ " print(df)\n",
277
+ " \n",
278
+ " # accuracy, perc true classes identified and perc wrong pred classes\n",
279
+ " performance = pd.concat([performance, perf_measure(df)])\n",
280
+ " print(performance)\n",
281
+ " \n",
282
+ " result_list.append(df)"
283
+ ]
284
+ },
285
+ {
286
+ "cell_type": "markdown",
287
+ "id": "b5c08703-7166-4d03-9d6b-ee2c12608134",
288
+ "metadata": {},
289
+ "source": [
290
+ "**Compute average performance measures**"
291
+ ]
292
+ },
293
+ {
294
+ "cell_type": "code",
295
+ "execution_count": null,
296
+ "id": "eb33fd31-94e6-40b5-9c36-a32effe77c01",
297
+ "metadata": {},
298
+ "outputs": [],
299
+ "source": [
300
+ "# Extract \"same_value\" column from each DataFrame\n",
301
+ "sv_columns = [df['same_value'] for df in result_list] # 'same' needs to be changed\n",
302
+ "sv_columns.insert(0, result_list[0]['superclass'])\n",
303
  "\n",
304
+ "# Combine into a new DataFrame (columns side-by-side)\n",
305
+ "sv_df = pd.concat(sv_columns, axis=1)\n",
 
 
 
 
 
 
 
 
 
 
 
 
306
  "\n",
307
+ "print(sv_df)"
308
  ]
309
  },
310
  {
311
  "cell_type": "code",
312
+ "execution_count": null,
313
+ "id": "bf7546cb-79ce-49ad-8cee-54d02239220c",
314
+ "metadata": {},
315
+ "outputs": [],
316
+ "source": [
317
+ "# Compute accuracy per superclass (row means of same_value matrix excluding the first column)\n",
318
+ "row_means = sv_df.iloc[:, 1:].mean(axis=1)\n",
319
+ "\n",
320
+ "df_row_means = pd.DataFrame({\n",
321
+ " 'superclass': sv_df['superclass'],\n",
322
+ " 'accuracy': row_means\n",
323
+ "})\n",
324
+ "\n",
325
+ "print(df_row_means)"
326
+ ]
327
+ },
328
+ {
329
+ "cell_type": "code",
330
+ "execution_count": null,
331
+ "id": "fd232953-59e8-4f28-9ce8-11515a2c310b",
332
  "metadata": {
333
  "scrolled": true
334
  },
335
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  "source": [
337
+ "# Compute performance measures per trip (mean for each column of performance table)\n",
338
+ "column_means = performance.mean()\n",
339
+ "print(column_means)\n",
340
+ "\n",
341
+ "# Plot histograms for all numeric columns\n",
342
+ "performance.hist(bins=10, figsize=(10, 6))\n",
343
+ "plt.tight_layout()\n",
344
+ "plt.show()"
345
  ]
346
  },
347
  {
348
  "cell_type": "code",
349
+ "execution_count": null,
350
+ "id": "bd682c84-3eb1-4a8d-9621-b741e98e4537",
351
  "metadata": {},
352
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  "source": [
354
+ "# save results\n",
355
+ "# Example data for one model\n",
356
+ "model_name = 'model_MoritzLaurer-DeBERTa-v3-base-mnli-fever-anli'\n",
357
+ "# Structure to save\n",
358
+ "model_result = {\n",
359
+ " 'model': model_name,\n",
360
+ " 'predictions': result_list,\n",
361
+ " 'performance': performance,\n",
362
+ " 'perf_summary': column_means,\n",
363
+ " 'perf_superclass': df_row_means\n",
364
+ "}\n",
365
+ "\n",
366
+ "# File path with folder\n",
367
+ "filename = os.path.join('results', f'{model_name}_results.pkl')\n",
368
+ "\n",
369
+ "# Save the object\n",
370
+ "with open(filename, 'wb') as f:\n",
371
+ " pickle.dump(model_result, f)"
372
  ]
373
  },
374
  {
375
+ "cell_type": "markdown",
376
+ "id": "e1cbb54e-abe6-49b6-957e-0683196f3199",
 
377
  "metadata": {},
 
 
 
 
 
 
 
 
 
378
  "source": [
379
+ "**Load and compare results**"
 
 
 
 
380
  ]
381
  },
382
  {
383
  "cell_type": "code",
384
+ "execution_count": 35,
385
+ "id": "62ca82b0-6909-4e6c-9d2c-fed87971e5b6",
386
  "metadata": {},
387
  "outputs": [
388
  {
389
  "name": "stdout",
390
  "output_type": "stream",
391
  "text": [
392
+ "Model: model_a_facebook-bart-large-mnli\n",
393
+ "Performance Summary:\n",
394
+ "accuracy 0.454545\n",
395
+ "true_ident 0.689394\n",
396
+ "false_pred 0.409091\n",
397
+ "dtype: float64\n",
398
+ "----------------------------------------\n",
399
+ "Model: model_b_sileod-deberta-v3-base-tasksource-nli\n",
400
+ "Performance Summary:\n",
401
+ "accuracy 0.500000\n",
402
+ "true_ident 0.666667\n",
403
+ "false_pred 0.551667\n",
404
+ "dtype: float64\n",
405
+ "----------------------------------------\n",
406
+ "Model: model_a_facebook-bart-large-mnli\n",
407
+ "Performance Summary:\n",
408
+ " superclass accuracy\n",
409
+ "0 activity_type 0.8\n",
410
+ "1 activities 0.0\n",
411
+ "2 climate_or_season 0.6\n",
412
+ "3 style_or_comfort 0.4\n",
413
+ "4 dress_code 0.7\n",
414
+ "5 accommodation 0.3\n",
415
+ "6 transportation 0.8\n",
416
+ "7 special_conditions 0.0\n",
417
+ "8 trip_length_days 0.5\n",
418
+ "----------------------------------------\n",
419
+ "Model: model_b_sileod-deberta-v3-base-tasksource-nli\n",
420
+ "Performance Summary:\n",
421
+ " superclass accuracy\n",
422
+ "0 activity_type 0.7\n",
423
+ "1 activities 0.1\n",
424
+ "2 climate_or_season 0.6\n",
425
+ "3 style_or_comfort 0.4\n",
426
+ "4 dress_code 0.6\n",
427
+ "5 accommodation 0.9\n",
428
+ "6 transportation 0.7\n",
429
+ "7 special_conditions 0.1\n",
430
+ "8 trip_length_days 0.5\n",
431
+ "----------------------------------------\n"
432
  ]
433
  }
434
  ],
435
  "source": [
436
+ "# Folder where your .pkl files are saved\n",
437
+ "results_dir = 'results'\n",
 
438
  "\n",
439
+ "# Dictionary to store all loaded results\n",
440
+ "all_results = {}\n",
441
  "\n",
442
+ "# Loop through all .pkl files in the folder\n",
443
+ "for filename in os.listdir(results_dir):\n",
444
+ " if filename.endswith('.pkl'):\n",
445
+ " model_name = filename.replace('_results.pkl', '') # Extract model name\n",
446
+ " file_path = os.path.join(results_dir, filename)\n",
447
+ " \n",
448
+ " # Load the result\n",
449
+ " with open(file_path, 'rb') as f:\n",
450
+ " result = pickle.load(f)\n",
451
+ " all_results[model_name] = result\n",
452
  "\n",
453
+ "# Now you can compare performance across models\n",
454
+ "for model, data in all_results.items():\n",
455
+ " print(f\"Model: {model}\")\n",
456
+ " print(f\"Performance Summary:\\n{data['perf_summary']}\")\n",
457
+ " print(\"-\" * 40)\n",
458
+ "\n",
459
+ "\n",
460
+ "# Now you can compare performance across models\n",
461
+ "for model, data in all_results.items():\n",
462
+ " print(f\"Model: {model}\")\n",
463
+ " print(f\"Performance Summary:\\n{data['perf_superclass']}\")\n",
464
+ " print(\"-\" * 40)"
465
  ]
466
  },
 
 
 
 
 
 
 
 
467
  {
468
  "cell_type": "markdown",
469
  "id": "17483df4-55c4-41cd-b8a9-61f7a5c7e8a3",
space/space/space/space/space/space/space/.DS_Store ADDED
Binary file (6.15 kB). View file
 
space/space/space/space/space/space/space/Candidate labels in Word en idee.docx ADDED
Binary file (21.1 kB). View file
 
space/space/space/space/space/space/space/gradio_tryout.ipynb CHANGED
@@ -8,59 +8,359 @@
8
  "# Try out gradio"
9
  ]
10
  },
 
 
 
 
 
 
11
  {
12
  "cell_type": "markdown",
13
  "id": "3bbee2e4-55c8-4b06-9929-72026edf7932",
14
  "metadata": {},
15
  "source": [
16
- "Try model"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  ]
18
  },
19
  {
20
  "cell_type": "code",
21
- "execution_count": 1,
22
- "id": "fa0d8126-e346-4412-9197-7d51baf868da",
23
  "metadata": {
24
  "scrolled": true
25
  },
26
  "outputs": [
27
  {
28
- "name": "stderr",
29
  "output_type": "stream",
30
  "text": [
31
- "Some weights of BartForSequenceClassification were not initialized from the model checkpoint at facebook/bart-base and are newly initialized: ['classification_head.dense.bias', 'classification_head.dense.weight', 'classification_head.out_proj.bias', 'classification_head.out_proj.weight']\n",
32
- "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
33
- "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n",
34
- "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to -1. Define a descriptive label2id mapping in the model config to ensure correct outputs.\n",
35
- "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  ]
37
  },
 
 
 
 
 
 
 
 
 
 
 
 
38
  {
39
  "name": "stdout",
40
  "output_type": "stream",
41
  "text": [
42
- "{'sequence': 'I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.', 'labels': ['Map', 'Compass', 'Laptop', 'Car charger', 'Toys', 'Travel crib', 'Hat', 'Playlist', 'Stroller', 'Currency', 'Travel adapter', 'Hostel lock', 'Pen', 'Charging cables', 'Flip-flops', 'Pacifier', 'Camping stove', 'Multi-tool', 'Passport', 'Poncho', 'Hiking boots', 'Portable charger', 'Power bank', 'Trekking poles', 'Snowboard', 'Base layers', 'Bandana', 'Aloe vera gel', 'Gloves', 'Baby blanket', 'Tent', 'Tent', 'Snorkel gear', 'Water filter', 'Diapers', 'Presentation materials', 'Nursing cover', 'Headphones', 'Sunscreen', 'Beach towel', 'Snacks', 'Ski jacket', 'Earplugs', 'Ski goggles', 'Flashlight', 'Neck wallet', 'Swimsuit', 'Notebook', 'Thermal clothing', 'Blanket', 'Snow boots', 'Sleeping bag', 'Lightweight backpack', 'Refillable water bottle', 'Quick-dry towel', 'Comfortable shoes', 'Reusable shopping bags', 'Travel journal', 'Travel pillow', 'Beach bag', 'Reusable coffee mug', 'Reusable water bottle', 'Festival tickets', 'Waterproof phone case', 'Business attire', 'Sunglasses', 'Sunglasses', 'Cooler', 'Baby clothes', 'Fanny pack', 'Beanie', 'First aid kit', 'Emergency roadside kit', 'Dry bag', 'SIM card', 'Energy bars', 'Baby food', 'Work ID badge', 'Packable rain jacket', 'Hand warmers', 'Visa documents', 'Glow sticks', 'Bug spray', 'Travel-sized toiletries', 'Dress shoes', 'Language phrasebook', 'Baby wipes', 'Lip balm', 'Travel insurance documents'], 'scores': [0.013028442859649658, 0.012909057550132275, 0.0124660674482584, 0.012431488372385502, 0.012379261665046215, 0.012377972714602947, 0.012329353019595146, 0.012096051126718521, 0.012086767703294754, 0.011947661638259888, 0.011939236894249916, 0.011935302056372166, 0.011887168511748314, 0.011814153753221035, 0.011788924224674702, 0.011783207766711712, 0.01177265401929617, 0.011771135963499546, 0.011747810058295727, 0.011738969013094902, 0.01169698778539896, 0.01166312862187624, 0.011658026836812496, 0.011596457101404667, 0.01158847101032734, 0.011561167426407337, 0.011526867747306824, 0.01149983424693346, 0.011472185142338276, 0.011455104686319828, 0.011445573531091213, 0.011445573531091213, 0.011444379575550556, 0.011416648514568806, 0.01136692427098751, 0.011363024823367596, 0.011361461132764816, 0.011328471824526787, 0.011299548670649529, 0.011291779577732086, 0.011282541789114475, 0.01127372495830059, 0.011270811781287193, 0.011263585649430752, 0.011179029010236263, 0.011149592697620392, 0.01113132108002901, 0.011122703552246094, 0.011105425655841827, 0.011101326905190945, 0.011090466752648354, 0.011066330596804619, 0.011058374308049679, 0.011055233888328075, 0.01103114802390337, 0.011022195219993591, 0.011012199334800243, 0.01100123766809702, 0.010985593311488628, 0.010961917228996754, 0.010958753526210785, 0.010938071645796299, 0.010903625749051571, 0.010879918932914734, 0.010863620787858963, 0.010824359022080898, 0.010824359022080898, 0.010805793106555939, 0.010763236321508884, 0.010710005648434162, 0.010690474882721901, 0.010647830553352833, 0.010583569295704365, 0.010571518912911415, 0.010570857673883438, 0.010552200488746166, 0.0105352271348238, 0.010523369535803795, 0.010514546185731888, 0.010479346849024296, 0.010450395755469799, 0.010436479933559895, 0.01043587177991867, 0.010400519706308842, 0.010214710608124733, 0.010052643716335297, 0.010041419416666031, 0.010003888048231602, 0.009946384467184544]}\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  ]
44
  }
45
  ],
46
  "source": [
 
47
  "from transformers import pipeline\n",
48
  "import gradio as gr\n",
49
  "\n",
50
- "# Load the model and create a pipeline for zero-shot classification\n",
51
- "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-base\")\n",
52
- "\n",
53
- "# Load labels from a txt file\n",
54
- "with open(\"labels.txt\", \"r\", encoding=\"utf-8\") as f:\n",
55
- " class_labels = [line.strip() for line in f if line.strip()]\n",
 
 
 
 
 
 
 
56
  "\n",
57
- "# Example text to classify\n",
58
- "input_text = \"I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.\"\n",
59
  "\n",
60
- "# Perform classification\n",
61
- "result = classifier(input_text, class_labels)\n",
 
 
 
 
 
62
  "\n",
63
- "print(result)"
 
 
64
  ]
65
  },
66
  {
@@ -73,7 +373,7 @@
73
  },
74
  {
75
  "cell_type": "code",
76
- "execution_count": 2,
77
  "id": "521d9118-b59d-4cc6-b637-20202eaf8f33",
78
  "metadata": {
79
  "scrolled": true
@@ -83,15 +383,16 @@
83
  "name": "stdout",
84
  "output_type": "stream",
85
  "text": [
86
- "Running on local URL: http://127.0.0.1:7860\n",
 
87
  "\n",
88
- "To create a public link, set `share=True` in `launch()`.\n"
89
  ]
90
  },
91
  {
92
  "data": {
93
  "text/html": [
94
- "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
95
  ],
96
  "text/plain": [
97
  "<IPython.core.display.HTML object>"
@@ -116,50 +417,16 @@
116
  "\n",
117
  "# Launch the Gradio app\n",
118
  "if __name__ == \"__main__\":\n",
119
- " demo.launch()"
120
- ]
121
- },
122
- {
123
- "cell_type": "markdown",
124
- "id": "d6526d18-6ba6-4a66-8310-21337b832d84",
125
- "metadata": {},
126
- "source": [
127
- "Simple app"
128
  ]
129
  },
130
  {
131
  "cell_type": "code",
132
  "execution_count": null,
133
- "id": "5496ded9-7294-4da4-af05-00e5846cdd04",
134
  "metadata": {},
135
  "outputs": [],
136
- "source": [
137
- "import gradio as gr\n",
138
- "from transformers import pipeline\n",
139
- "\n",
140
- "# Initialize the zero-shot classification pipeline\n",
141
- "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-base\")\n",
142
- "\n",
143
- "# Define the classification function\n",
144
- "def classify_text(text, labels):\n",
145
- " labels = labels.split(\",\") # Convert the comma-separated string into a list\n",
146
- " result = classifier(text, candidate_labels=labels)\n",
147
- " return result\n",
148
- "\n",
149
- "# Set up the Gradio interface\n",
150
- "with gr.Blocks() as demo:\n",
151
- " gr.Markdown(\"# Zero-Shot Classification\")\n",
152
- " text_input = gr.Textbox(label=\"Input Text\")\n",
153
- " label_input = gr.Textbox(label=\"Comma-separated Labels\")\n",
154
- " output = gr.JSON(label=\"Result\")\n",
155
- " classify_button = gr.Button(\"Classify\")\n",
156
- "\n",
157
- " # Link the button to the classification function\n",
158
- " classify_button.click(classify_text, inputs=[text_input, label_input], outputs=output)\n",
159
- "\n",
160
- "# Launch the Gradio interface\n",
161
- "demo.launch()"
162
- ]
163
  }
164
  ],
165
  "metadata": {
 
8
  "# Try out gradio"
9
  ]
10
  },
11
+ {
12
+ "cell_type": "markdown",
13
+ "id": "afd23321-1870-44af-82ed-bb241d055dfa",
14
+ "metadata": {},
15
+ "source": []
16
+ },
17
  {
18
  "cell_type": "markdown",
19
  "id": "3bbee2e4-55c8-4b06-9929-72026edf7932",
20
  "metadata": {},
21
  "source": [
22
+ "**Load and try the model**"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 16,
28
+ "id": "f8c28d2d-8458-49fd-8ebf-5e729d6e861f",
29
+ "metadata": {},
30
+ "outputs": [
31
+ {
32
+ "name": "stderr",
33
+ "output_type": "stream",
34
+ "text": [
35
+ "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n"
36
+ ]
37
+ },
38
+ {
39
+ "name": "stdout",
40
+ "output_type": "stream",
41
+ "text": [
42
+ "First trip: I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands. \n",
43
+ "\n",
44
+ "Trip type: ['beach vacation', ['swimming', 'going to the beach', 'relaxing', 'hiking'], 'warm destination / summer', 'lightweight (but comfortable)', 'casual', 'indoor', 'no vehicle', 'no special conditions', '7 days']\n"
45
+ ]
46
+ }
47
+ ],
48
+ "source": [
49
+ "# Prerequisites\n",
50
+ "from tabulate import tabulate\n",
51
+ "from transformers import pipeline\n",
52
+ "import json\n",
53
+ "import pandas as pd\n",
54
+ "\n",
55
+ "# Load the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
56
+ "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-large-mnli\")\n",
57
+ "\n",
58
+ "# get candidate labels\n",
59
+ "with open(\"packing_label_structure.json\", \"r\") as file:\n",
60
+ " candidate_labels = json.load(file)\n",
61
+ "keys_list = list(candidate_labels.keys())\n",
62
+ "\n",
63
+ "# Load test data (in list of dictionaries)\n",
64
+ "with open(\"test_data.json\", \"r\") as file:\n",
65
+ " packing_data = json.load(file)\n",
66
+ "# Extract all trip descriptions and trip_types\n",
67
+ "trip_descriptions = [trip['description'] for trip in packing_data]\n",
68
+ "trip_types = [trip['trip_types'] for trip in packing_data]\n",
69
+ "\n",
70
+ "# Access the first trip description\n",
71
+ "first_trip = trip_descriptions[0]\n",
72
+ "# Get the packing list for the secondfirst trip\n",
73
+ "first_trip_type = trip_types[0]\n",
74
+ "\n",
75
+ "print(f\"First trip: {first_trip} \\n\")\n",
76
+ "print(f\"Trip type: {first_trip_type}\")"
77
  ]
78
  },
79
  {
80
  "cell_type": "code",
81
+ "execution_count": 37,
82
+ "id": "fed1f8bc-5baf-46e7-8763-9d56fb9c536b",
83
  "metadata": {
84
  "scrolled": true
85
  },
86
  "outputs": [
87
  {
88
+ "name": "stdout",
89
  "output_type": "stream",
90
  "text": [
91
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['beach vacation', 'micro-adventure / weekend trip', 'nature escape', 'digital nomad trip', 'cultural exploration', 'yoga / wellness retreat', 'festival trip', 'long-distance hike / thru-hike', 'hut trek (summer)', 'city trip', 'road trip (car/camper)', 'ski tour / skitour', 'camping trip (campground)', 'snowboard / splitboard trip', 'camping trip (wild camping)', 'hut trek (winter)'], 'scores': [0.37631064653396606, 0.35016775131225586, 0.13397355377674103, 0.031636204570531845, 0.031270742416381836, 0.012846449390053749, 0.012699575163424015, 0.009526746347546577, 0.008148356340825558, 0.007793044205754995, 0.006512156222015619, 0.005669699050486088, 0.0044484627433121204, 0.004113250877708197, 0.002713854657486081, 0.002169555053114891]}\n",
92
+ "beach vacation\n",
93
+ "0\n",
94
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['going to the beach', 'relaxing', 'hiking', 'swimming', 'sightseeing', 'running', 'hut-to-hut hiking', 'biking', 'photography', 'surfing', 'stand-up paddleboarding (SUP)', 'snorkeling', 'yoga', 'kayaking / canoeing', 'rock climbing', 'fishing', 'paragliding', 'rafting', 'horseback riding', 'snowshoe hiking', 'cross-country skiing', 'ice climbing', 'skiing', 'scuba diving', 'ski touring'], 'scores': [0.9914858341217041, 0.9771362543106079, 0.9426282048225403, 0.21901991963386536, 0.17586199939250946, 0.09854521602392197, 0.08370419591665268, 0.03679152950644493, 0.03668990358710289, 0.03099300153553486, 0.025300050154328346, 0.021451234817504883, 0.011070131324231625, 0.0075112744234502316, 0.006306737195700407, 0.0034973458386957645, 0.002655829070135951, 0.00197031581774354, 0.0015599008183926344, 0.001527810120023787, 0.0015017405385151505, 0.0014336870517581701, 0.0011686616344377398, 0.000789369223639369, 0.0004912536824122071]}\n",
95
+ "['going to the beach', 'relaxing', 'hiking']\n",
96
+ "1\n",
97
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['warm destination / summer', 'tropical / humid', 'variable weather / spring / autumn', 'dry / desert-like', 'cold destination / winter'], 'scores': [0.6468702554702759, 0.19999535381793976, 0.09394325315952301, 0.05279730260372162, 0.0063938056118786335]}\n",
98
+ "warm destination / summer\n",
99
+ "2\n",
100
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['minimalist', 'ultralight', 'lightweight (but comfortable)', 'luxury (including evening wear)'], 'scores': [0.4286234974861145, 0.2564568817615509, 0.2147122174501419, 0.10020739585161209]}\n",
101
+ "minimalist\n",
102
+ "3\n",
103
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['casual', 'conservative', 'formal (business trip)'], 'scores': [0.6567223072052002, 0.3034382164478302, 0.039839524775743484]}\n",
104
+ "casual\n",
105
+ "4\n",
106
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['huts with half board', 'indoor', 'sleeping in a car', 'sleeping in a tent'], 'scores': [0.5007699728012085, 0.34074831008911133, 0.10416240990161896, 0.05431929975748062]}\n",
107
+ "huts with half board\n",
108
+ "5\n",
109
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['vehicle', 'no vehicle'], 'scores': [0.7521055936813354, 0.24789436161518097]}\n",
110
+ "vehicle\n",
111
+ "6\n",
112
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['off-grid / no electricity', 'self-supported (bring your own food/cooking)', 'no special conditions', 'pet-friendly', 'rainy climate', 'child-friendly', 'snow and ice', 'high alpine terrain', 'avalanche-prone terrain'], 'scores': [0.46220096945762634, 0.12957870960235596, 0.10651793330907822, 0.09777138382196426, 0.06722460687160492, 0.0632496327161789, 0.04952802509069443, 0.015049820765852928, 0.008878983557224274]}\n",
113
+ "off-grid / no electricity\n",
114
+ "7\n",
115
+ "{'sequence': 'I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands.', 'labels': ['7+ days', '2 days', '1 day', '6 days', '3 days', '4 days', '7 days', '5 days'], 'scores': [0.21139054000377655, 0.18512114882469177, 0.14520084857940674, 0.0976138487458229, 0.094282366335392, 0.09376301616430283, 0.09161651134490967, 0.08101171255111694]}\n",
116
+ "7+ days\n",
117
+ "8\n"
118
+ ]
119
+ }
120
+ ],
121
+ "source": [
122
+ "# Create an empty DataFrame with specified columns\n",
123
+ "df = pd.DataFrame(columns=['superclass', 'pred_class'])\n",
124
+ "cutoff = 0.5 # used to choose which activities are relevant\n",
125
+ "\n",
126
+ "# fill DataFrame\n",
127
+ "for i, key in enumerate(keys_list):\n",
128
+ " # Run the classification (ca 30 seconds classifying)\n",
129
+ " if key == 'activities':\n",
130
+ " result = classifier(first_trip, candidate_labels[key], multi_label=True)\n",
131
+ " indices = [i for i, score in enumerate(result['scores']) if score > cutoff]\n",
132
+ " classes = [result['labels'][i] for i in indices]\n",
133
+ " else:\n",
134
+ " result = classifier(first_trip, candidate_labels[key])\n",
135
+ " classes = result[\"labels\"][0]\n",
136
+ " print(result)\n",
137
+ " print(classes)\n",
138
+ " print(i)\n",
139
+ " df.loc[i] = [key, classes]\n",
140
+ "\n",
141
+ "df['true_class'] = first_trip_type"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": 40,
147
+ "id": "b3b51280-76a1-4229-a9de-070b925d3463",
148
+ "metadata": {
149
+ "scrolled": true
150
+ },
151
+ "outputs": [
152
+ {
153
+ "name": "stdout",
154
+ "output_type": "stream",
155
+ "text": [
156
+ " Superclass pred_class true_class\n",
157
+ "0 activity_type beach vacation beach vacation\n",
158
+ "1 activities [going to the beach, relaxing, hiking] [swimming, going to the beach, relaxing, hiking]\n",
159
+ "2 climate_or_season warm destination / summer warm destination / summer\n",
160
+ "3 style_or_comfort minimalist lightweight (but comfortable)\n",
161
+ "4 dress_code casual casual\n",
162
+ "5 accommodation huts with half board indoor\n",
163
+ "6 transportation vehicle no vehicle\n",
164
+ "7 special_conditions off-grid / no electricity no special conditions\n",
165
+ "8 trip_length_days 7+ days 7 days\n"
166
+ ]
167
+ }
168
+ ],
169
+ "source": [
170
+ "pd.set_option('display.width', 1000) \n",
171
+ "pd.set_option('display.max_columns', None)\n",
172
+ "print(df)"
173
+ ]
174
+ },
175
+ {
176
+ "cell_type": "code",
177
+ "execution_count": 42,
178
+ "id": "2ec09e8f-75f5-45b1-b4c0-4fafd685d36b",
179
+ "metadata": {},
180
+ "outputs": [
181
+ {
182
+ "name": "stdout",
183
+ "output_type": "stream",
184
+ "text": [
185
+ " Superclass pred_class true_class same\n",
186
+ "0 activity_type beach vacation beach vacation True\n",
187
+ "1 activities [going to the beach, relaxing, hiking] [swimming, going to the beach, relaxing, hiking] False\n",
188
+ "2 climate_or_season warm destination / summer warm destination / summer True\n",
189
+ "3 style_or_comfort minimalist lightweight (but comfortable) False\n",
190
+ "4 dress_code casual casual True\n",
191
+ "5 accommodation huts with half board indoor False\n",
192
+ "6 transportation vehicle no vehicle False\n",
193
+ "7 special_conditions off-grid / no electricity no special conditions False\n",
194
+ "8 trip_length_days 7+ days 7 days False\n"
195
+ ]
196
+ }
197
+ ],
198
+ "source": [
199
+ "df['same'] = df['pred_class'] == df['true_class']\n",
200
+ "print(df)"
201
+ ]
202
+ },
203
+ {
204
+ "cell_type": "code",
205
+ "execution_count": 62,
206
+ "id": "82ae19c8-8bb7-4f7f-841b-1cb6501a17a7",
207
+ "metadata": {},
208
+ "outputs": [
209
+ {
210
+ "name": "stdout",
211
+ "output_type": "stream",
212
+ "text": [
213
+ "Accuracy (excluding activities): 0.3333333333333333\n"
214
+ ]
215
+ }
216
+ ],
217
+ "source": [
218
+ "# accuracy excluding activities\n",
219
+ "correct = sum(df.loc[df.index != 1, 'same'])\n",
220
+ "total = len(df['same'])\n",
221
+ "accuracy = correct/total\n",
222
+ "print(\"Accuracy (excluding activities):\", accuracy)"
223
+ ]
224
+ },
225
+ {
226
+ "cell_type": "code",
227
+ "execution_count": 64,
228
+ "id": "16c0a3ae-34ac-49a4-b59f-411a6f0ce947",
229
+ "metadata": {},
230
+ "outputs": [
231
+ {
232
+ "name": "stdout",
233
+ "output_type": "stream",
234
+ "text": [
235
+ "Percentage of true classes that were identified: 0.75\n",
236
+ "Percentage of predicted classes that were wrong: 0.0\n"
237
+ ]
238
+ }
239
+ ],
240
+ "source": [
241
+ "pred_class = df.loc[df.index == 1, 'pred_class'].iloc[0]\n",
242
+ "true_class = df.loc[df.index == 1, 'true_class'].iloc[0]\n",
243
+ "correct = [label for label in pred_class if label in true_class]\n",
244
+ "\n",
245
+ "num_correct = len(correct)\n",
246
+ "correct_perc = num_correct/len(true_class)\n",
247
+ "\n",
248
+ "num_pred = len(pred_class)\n",
249
+ "wrong_perc = (num_pred - num_correct)/num_pred\n",
250
+ "\n",
251
+ "print(\"Percentage of true classes that were identified:\", correct_perc)\n",
252
+ "print(\"Percentage of predicted classes that were wrong:\", wrong_perc)"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "markdown",
257
+ "id": "62c5c18c-58f4-465c-a188-c57cfa7ffa90",
258
+ "metadata": {},
259
+ "source": [
260
+ "Now do the same for all trips"
261
+ ]
262
+ },
263
+ {
264
+ "cell_type": "code",
265
+ "execution_count": null,
266
+ "id": "e4192b42-f1bc-4fcb-a238-dbdb3df7d699",
267
+ "metadata": {},
268
+ "outputs": [],
269
+ "source": []
270
+ },
271
+ {
272
+ "cell_type": "markdown",
273
+ "id": "17483df4-55c4-41cd-b8a9-61f7a5c7e8a3",
274
+ "metadata": {},
275
+ "source": [
276
+ "**Use gradio for user input**"
277
+ ]
278
+ },
279
+ {
280
+ "cell_type": "code",
281
+ "execution_count": 66,
282
+ "id": "cb7fd425-d0d6-458d-97ca-2150dc55f206",
283
+ "metadata": {},
284
+ "outputs": [
285
+ {
286
+ "name": "stdout",
287
+ "output_type": "stream",
288
+ "text": [
289
+ "Running on local URL: http://127.0.0.1:7861\n",
290
+ "Running on public URL: https://aa06d5d85ffadaa92b.gradio.live\n",
291
+ "\n",
292
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
293
  ]
294
  },
295
+ {
296
+ "data": {
297
+ "text/html": [
298
+ "<div><iframe src=\"https://aa06d5d85ffadaa92b.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
299
+ ],
300
+ "text/plain": [
301
+ "<IPython.core.display.HTML object>"
302
+ ]
303
+ },
304
+ "metadata": {},
305
+ "output_type": "display_data"
306
+ },
307
  {
308
  "name": "stdout",
309
  "output_type": "stream",
310
  "text": [
311
+ "0\n",
312
+ "1\n",
313
+ "2\n",
314
+ "3\n",
315
+ "4\n",
316
+ "5\n",
317
+ "6\n",
318
+ "7\n",
319
+ "8\n",
320
+ "0\n",
321
+ "1\n",
322
+ "2\n",
323
+ "3\n",
324
+ "4\n",
325
+ "5\n",
326
+ "6\n",
327
+ "7\n",
328
+ "8\n"
329
  ]
330
  }
331
  ],
332
  "source": [
333
+ "# use model with gradio\n",
334
  "from transformers import pipeline\n",
335
  "import gradio as gr\n",
336
  "\n",
337
+ "# make a function for what I am doing\n",
338
+ "def classify(text):\n",
339
+ " df = pd.DataFrame(columns=['Superclass', 'class'])\n",
340
+ " for i, key in enumerate(keys_list):\n",
341
+ " # Run the classification (ca 30 seconds classifying)\n",
342
+ " if key == 'activities':\n",
343
+ " result = classifier(text, candidate_labels[key], multi_label=True)\n",
344
+ " classes = [result['labels'][i] for i in indices]\n",
345
+ " else:\n",
346
+ " result = classifier(text, candidate_labels[key])\n",
347
+ " classes = result[\"labels\"][0]\n",
348
+ " print(i)\n",
349
+ " df.loc[i] = [key, classes]\n",
350
  "\n",
351
+ " return df\n",
 
352
  "\n",
353
+ "demo = gr.Interface(\n",
354
+ " fn=classify,\n",
355
+ " inputs=\"text\",\n",
356
+ " outputs=\"dataframe\",\n",
357
+ " title=\"Zero-Shot Classification\",\n",
358
+ " description=\"Enter a text describing your trip\",\n",
359
+ ")\n",
360
  "\n",
361
+ "# Launch the Gradio app\n",
362
+ "if __name__ == \"__main__\":\n",
363
+ " demo.launch(share=True)"
364
  ]
365
  },
366
  {
 
373
  },
374
  {
375
  "cell_type": "code",
376
+ "execution_count": 4,
377
  "id": "521d9118-b59d-4cc6-b637-20202eaf8f33",
378
  "metadata": {
379
  "scrolled": true
 
383
  "name": "stdout",
384
  "output_type": "stream",
385
  "text": [
386
+ "Running on local URL: http://127.0.0.1:7861\n",
387
+ "Running on public URL: https://0f70ba5369d721cf8f.gradio.live\n",
388
  "\n",
389
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
390
  ]
391
  },
392
  {
393
  "data": {
394
  "text/html": [
395
+ "<div><iframe src=\"https://0f70ba5369d721cf8f.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
396
  ],
397
  "text/plain": [
398
  "<IPython.core.display.HTML object>"
 
417
  "\n",
418
  "# Launch the Gradio app\n",
419
  "if __name__ == \"__main__\":\n",
420
+ " demo.launch(share=True)"
 
 
 
 
 
 
 
 
421
  ]
422
  },
423
  {
424
  "cell_type": "code",
425
  "execution_count": null,
426
+ "id": "c8da1c90-d3a3-4b08-801c-b3afa17b2633",
427
  "metadata": {},
428
  "outputs": [],
429
+ "source": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430
  }
431
  ],
432
  "metadata": {
space/space/space/space/space/space/space/packing_label_hierarchical_mapping.json ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ski touring": {
3
+ "category": "activities",
4
+ "superclass": "winter sport"
5
+ },
6
+ "cross-country skiing": {
7
+ "category": "activities",
8
+ "superclass": "winter sport"
9
+ },
10
+ "skiing": {
11
+ "category": "activities",
12
+ "superclass": "winter sport"
13
+ },
14
+ "snowboard / splitboard trip": {
15
+ "category": "activity_type",
16
+ "superclass": "winter sport"
17
+ },
18
+ "snowshoe hiking": {
19
+ "category": "activities",
20
+ "superclass": "winter sport"
21
+ },
22
+ "ice climbing": {
23
+ "category": "activities",
24
+ "superclass": "winter sport"
25
+ },
26
+ "hiking": {
27
+ "category": "activities",
28
+ "superclass": "outdoor land"
29
+ },
30
+ "hut-to-hut hiking": {
31
+ "category": "activities",
32
+ "superclass": "outdoor land"
33
+ },
34
+ "huttentocht (zomer)": {
35
+ "category": "activity_type",
36
+ "superclass": "outdoor land"
37
+ },
38
+ "huttentocht (winter)": {
39
+ "category": "activity_type",
40
+ "superclass": "winter sport"
41
+ },
42
+ "rock climbing": {
43
+ "category": "activities",
44
+ "superclass": "outdoor land"
45
+ },
46
+ "biking": {
47
+ "category": "activities",
48
+ "superclass": "outdoor land"
49
+ },
50
+ "running": {
51
+ "category": "activities",
52
+ "superclass": "fitness"
53
+ },
54
+ "swimming": {
55
+ "category": "activities",
56
+ "superclass": "water sport"
57
+ },
58
+ "kayaking / canoeing": {
59
+ "category": "activities",
60
+ "superclass": "water sport"
61
+ },
62
+ "stand-up paddleboarding (SUP)": {
63
+ "category": "activities",
64
+ "superclass": "water sport"
65
+ },
66
+ "snorkeling": {
67
+ "category": "activities",
68
+ "superclass": "water sport"
69
+ },
70
+ "scuba diving": {
71
+ "category": "activities",
72
+ "superclass": "water sport"
73
+ },
74
+ "surfing": {
75
+ "category": "activities",
76
+ "superclass": "water sport"
77
+ },
78
+ "fishing": {
79
+ "category": "activities",
80
+ "superclass": "water sport"
81
+ },
82
+ "rafting": {
83
+ "category": "activities",
84
+ "superclass": "water sport"
85
+ },
86
+ "paragliding": {
87
+ "category": "activities",
88
+ "superclass": "adventure sport"
89
+ },
90
+ "horseback riding": {
91
+ "category": "activities",
92
+ "superclass": "outdoor land"
93
+ },
94
+ "photography": {
95
+ "category": "activities",
96
+ "superclass": "leisure"
97
+ },
98
+ "relaxing": {
99
+ "category": "activities",
100
+ "superclass": "leisure"
101
+ },
102
+ "sightseeing": {
103
+ "category": "activities",
104
+ "superclass": "cultural"
105
+ },
106
+ "yoga": {
107
+ "category": "activities",
108
+ "superclass": "wellness"
109
+ },
110
+ "festivaltrip": {
111
+ "category": "activity_type",
112
+ "superclass": "urban / event"
113
+ },
114
+ "stedentrip": {
115
+ "category": "activity_type",
116
+ "superclass": "urban / cultural"
117
+ },
118
+ "roadtrip (auto/camper)": {
119
+ "category": "activity_type",
120
+ "superclass": "travel mode"
121
+ },
122
+ "digitale nomade reis": {
123
+ "category": "activity_type",
124
+ "superclass": "remote work"
125
+ },
126
+ "kampeerreis (wildkamperen)": {
127
+ "category": "activity_type",
128
+ "superclass": "outdoor overnight"
129
+ },
130
+ "kampeerreis (camping)": {
131
+ "category": "activity_type",
132
+ "superclass": "outdoor overnight"
133
+ },
134
+ "langeafstandswandeling / thru-hike": {
135
+ "category": "activity_type",
136
+ "superclass": "backpacking"
137
+ },
138
+ "microavontuur / weekendtrip": {
139
+ "category": "activity_type",
140
+ "superclass": "short trip"
141
+ },
142
+ "yoga / wellness retreat": {
143
+ "category": "activity_type",
144
+ "superclass": "wellness"
145
+ },
146
+ "strandvakantie": {
147
+ "category": "activity_type",
148
+ "superclass": "beach / leisure"
149
+ },
150
+ "warme bestemming / zomer": {
151
+ "category": "climate_or_season",
152
+ "superclass": "warm"
153
+ },
154
+ "koude bestemming / winter": {
155
+ "category": "climate_or_season",
156
+ "superclass": "cold"
157
+ },
158
+ "wisselvallig / lente / herfst": {
159
+ "category": "climate_or_season",
160
+ "superclass": "mild"
161
+ },
162
+ "tropisch / vochtig": {
163
+ "category": "climate_or_season",
164
+ "superclass": "humid"
165
+ },
166
+ "droog / woestijnachtig": {
167
+ "category": "climate_or_season",
168
+ "superclass": "dry"
169
+ },
170
+ "ultralight": {
171
+ "category": "style_or_comfort",
172
+ "superclass": "packing style"
173
+ },
174
+ "lichtgewicht (maar comfortabel)": {
175
+ "category": "style_or_comfort",
176
+ "superclass": "packing style"
177
+ },
178
+ "luxe (inclusief avondkleding)": {
179
+ "category": "style_or_comfort",
180
+ "superclass": "comfort / luxury"
181
+ },
182
+ "casual": {
183
+ "category": "dress_code",
184
+ "superclass": "informal"
185
+ },
186
+ "formeel (zakelijke reis)": {
187
+ "category": "dress_code",
188
+ "superclass": "formal"
189
+ },
190
+ "conservative": {
191
+ "category": "dress_code",
192
+ "superclass": "cultural"
193
+ },
194
+ "minimalistisch": {
195
+ "category": "style_or_comfort",
196
+ "superclass": "packing style"
197
+ },
198
+ "off-grid / geen stroom": {
199
+ "category": "special_conditions",
200
+ "superclass": "independent"
201
+ },
202
+ "self-supported (eten/koken zelf meenemen)": {
203
+ "category": "special_conditions",
204
+ "superclass": "independent"
205
+ },
206
+ "regenachtig klimaat": {
207
+ "category": "special_conditions",
208
+ "superclass": "weather"
209
+ },
210
+ "sneeuw en ijs": {
211
+ "category": "special_conditions",
212
+ "superclass": "weather"
213
+ },
214
+ "hoog alpine terrein": {
215
+ "category": "special_conditions",
216
+ "superclass": "terrain"
217
+ },
218
+ "lawinegevoelig terrein": {
219
+ "category": "special_conditions",
220
+ "superclass": "terrain"
221
+ },
222
+ "hutten met halfpension": {
223
+ "category": "accommodation",
224
+ "superclass": "type"
225
+ },
226
+ "slapen in tent": {
227
+ "category": "accommodation",
228
+ "superclass": "type"
229
+ },
230
+ "slapen in auto": {
231
+ "category": "accommodation",
232
+ "superclass": "type"
233
+ },
234
+ "kindvriendelijk": {
235
+ "category": "special_conditions",
236
+ "superclass": "target group"
237
+ },
238
+ "huisdiervriendelijk": {
239
+ "category": "special_conditions",
240
+ "superclass": "target group"
241
+ },
242
+ "accommodation: outdoor": {
243
+ "category": "accommodation",
244
+ "superclass": "type"
245
+ },
246
+ "accommodation: indoor": {
247
+ "category": "accommodation",
248
+ "superclass": "type"
249
+ },
250
+ "transportation: vehicle": {
251
+ "category": "transportation",
252
+ "superclass": "mode"
253
+ },
254
+ "transportation: no vehicle": {
255
+ "category": "transportation",
256
+ "superclass": "mode"
257
+ },
258
+ "1 day": {
259
+ "category": "trip_length_days",
260
+ "superclass": "duration"
261
+ },
262
+ "2 days": {
263
+ "category": "trip_length_days",
264
+ "superclass": "duration"
265
+ },
266
+ "3 days": {
267
+ "category": "trip_length_days",
268
+ "superclass": "duration"
269
+ },
270
+ "4 days": {
271
+ "category": "trip_length_days",
272
+ "superclass": "duration"
273
+ },
274
+ "5 days": {
275
+ "category": "trip_length_days",
276
+ "superclass": "duration"
277
+ },
278
+ "6 days": {
279
+ "category": "trip_length_days",
280
+ "superclass": "duration"
281
+ },
282
+ "7 days": {
283
+ "category": "trip_length_days",
284
+ "superclass": "duration"
285
+ },
286
+ "7+ days": {
287
+ "category": "trip_length_days",
288
+ "superclass": "duration"
289
+ }
290
+ }
space/space/space/space/space/space/space/packing_label_structure.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activity_type": [
3
+ "hut trek (summer)",
4
+ "hut trek (winter)",
5
+ "camping trip (wild camping)",
6
+ "camping trip (campground)",
7
+ "ski tour / skitour",
8
+ "snowboard / splitboard trip",
9
+ "long-distance hike / thru-hike",
10
+ "digital nomad trip",
11
+ "city trip",
12
+ "road trip (car/camper)",
13
+ "festival trip",
14
+ "yoga / wellness retreat",
15
+ "micro-adventure / weekend trip",
16
+ "beach vacation",
17
+ "cultural exploration",
18
+ "nature escape"
19
+ ],
20
+ "activities": [
21
+ "swimming",
22
+ "going to the beach",
23
+ "relaxing",
24
+ "sightseeing",
25
+ "biking",
26
+ "running",
27
+ "skiing",
28
+ "cross-country skiing",
29
+ "ski touring",
30
+ "hiking",
31
+ "hut-to-hut hiking",
32
+ "rock climbing",
33
+ "ice climbing",
34
+ "snowshoe hiking",
35
+ "kayaking / canoeing",
36
+ "stand-up paddleboarding (SUP)",
37
+ "snorkeling",
38
+ "scuba diving",
39
+ "surfing",
40
+ "paragliding",
41
+ "horseback riding",
42
+ "photography",
43
+ "fishing",
44
+ "rafting",
45
+ "yoga"
46
+ ],
47
+ "climate_or_season": [
48
+ "cold destination / winter",
49
+ "warm destination / summer",
50
+ "variable weather / spring / autumn",
51
+ "tropical / humid",
52
+ "dry / desert-like"
53
+ ],
54
+ "style_or_comfort": [
55
+ "ultralight",
56
+ "lightweight (but comfortable)",
57
+ "luxury (including evening wear)",
58
+ "minimalist"
59
+ ],
60
+ "dress_code": [
61
+ "casual",
62
+ "formal (business trip)",
63
+ "conservative"
64
+ ],
65
+ "accommodation": [
66
+ "indoor",
67
+ "huts with half board",
68
+ "sleeping in a tent",
69
+ "sleeping in a car"
70
+ ],
71
+ "transportation": [
72
+ "vehicle",
73
+ "no vehicle"
74
+ ],
75
+ "special_conditions": [
76
+ "off-grid / no electricity",
77
+ "self-supported (bring your own food/cooking)",
78
+ "child-friendly",
79
+ "pet-friendly",
80
+ "rainy climate",
81
+ "snow and ice",
82
+ "high alpine terrain",
83
+ "avalanche-prone terrain",
84
+ "no special conditions"
85
+ ],
86
+ "trip_length_days": [
87
+ "1 day",
88
+ "2 days",
89
+ "3 days",
90
+ "4 days",
91
+ "5 days",
92
+ "6 days",
93
+ "7 days",
94
+ "7+ days"
95
+ ]
96
+ }
space/space/space/space/space/space/space/packing_list_api.ipynb CHANGED
@@ -59,61 +59,6 @@
59
  "print(output)\n"
60
  ]
61
  },
62
- {
63
- "cell_type": "markdown",
64
- "id": "fb7e69c7-b590-4b40-8478-76d055583f2a",
65
- "metadata": {},
66
- "source": [
67
- "**Try packing list labels**"
68
- ]
69
- },
70
- {
71
- "cell_type": "code",
72
- "execution_count": 4,
73
- "id": "c5f75916-aaf2-4ca7-8d1a-070579940952",
74
- "metadata": {},
75
- "outputs": [
76
- {
77
- "name": "stdout",
78
- "output_type": "stream",
79
- "text": [
80
- "{'error': ['Error in `parameters.candidate_labels`: ensure this value has at most 10 items']}\n"
81
- ]
82
- }
83
- ],
84
- "source": [
85
- "# Input text to classify\n",
86
- "input_text = \"I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.\"\n",
87
- "\n",
88
- "# Candidate labels\n",
89
- "candidate_labels = [\n",
90
- " \"Swimsuit\", \"Sunscreen\", \"Flip-flops\", \"Beach towel\", \"Sunglasses\", \n",
91
- " \"Waterproof phone case\", \"Hat\", \"Beach bag\", \"Snorkel gear\", \"Aloe vera gel\",\n",
92
- " \"Tent\", \"Sleeping bag\", \"Camping stove\", \"Flashlight\", \"Hiking boots\",\n",
93
- " \"Water filter\", \"Compass\", \"First aid kit\", \"Bug spray\", \"Multi-tool\",\n",
94
- " \"Thermal clothing\", \"Ski jacket\", \"Ski goggles\", \"Snow boots\", \"Gloves\",\n",
95
- " \"Hand warmers\", \"Beanie\", \"Lip balm\", \"Snowboard\", \"Base layers\",\n",
96
- " \"Passport\", \"Visa documents\", \"Travel adapter\", \"Currency\", \"Language phrasebook\",\n",
97
- " \"SIM card\", \"Travel pillow\", \"Neck wallet\", \"Travel insurance documents\", \"Power bank\",\n",
98
- " \"Laptop\", \"Notebook\", \"Business attire\", \"Dress shoes\", \"Charging cables\",\n",
99
- " \"Presentation materials\", \"Work ID badge\", \"Pen\", \"Headphones\", \n",
100
- " \"Lightweight backpack\", \"Travel-sized toiletries\", \"Packable rain jacket\",\n",
101
- " \"Reusable water bottle\", \"Dry bag\", \"Trekking poles\", \"Hostel lock\", \"Quick-dry towel\",\n",
102
- " \"Travel journal\", \"Energy bars\", \"Car charger\", \"Snacks\", \"Map\",\n",
103
- " \"Sunglasses\", \"Cooler\", \"Blanket\", \"Emergency roadside kit\", \"Reusable coffee mug\",\n",
104
- " \"Playlist\", \"Reusable shopping bags\", \"Earplugs\", \"Fanny pack\", \"Portable charger\",\n",
105
- " \"Poncho\", \"Bandana\", \"Comfortable shoes\", \"Tent\", \"Refillable water bottle\",\n",
106
- " \"Glow sticks\", \"Festival tickets\", \"Diapers\", \"Baby wipes\", \"Baby food\",\n",
107
- " \"Stroller\", \"Pacifier\", \"Baby clothes\", \"Baby blanket\", \"Travel crib\",\n",
108
- " \"Toys\", \"Nursing cover\"\n",
109
- "]\n",
110
- "\n",
111
- "\n",
112
- "# Get the prediction\n",
113
- "output = query({\"inputs\": input_text, \"parameters\": {\"candidate_labels\": candidate_labels}})\n",
114
- "print(output)"
115
- ]
116
- },
117
  {
118
  "cell_type": "markdown",
119
  "id": "edf44387-d166-4e0f-a8ad-621230aee115",
@@ -158,7 +103,7 @@
158
  "trips = list(packing_data.keys())\n",
159
  "# Access the first trip description\n",
160
  "first_trip = trips[0]\n",
161
- "# Get the packing list for the second trip\n",
162
  "first_trip_items = packing_data[first_trip]\n",
163
  "\n",
164
  "print(f\"First trip: {first_trip} \\n\")\n",
@@ -243,7 +188,7 @@
243
  },
244
  {
245
  "cell_type": "code",
246
- "execution_count": null,
247
  "id": "116c7ee3-2b59-4623-a416-162c487aab70",
248
  "metadata": {},
249
  "outputs": [],
@@ -254,10 +199,204 @@
254
  },
255
  {
256
  "cell_type": "code",
257
- "execution_count": null,
258
  "id": "8591425b-ce55-4a36-a4b6-70974e8d4e59",
259
  "metadata": {},
260
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
  "source": [
262
  "# Creating a table\n",
263
  "table = zip(result_bart_base[\"labels\"], \n",
 
59
  "print(output)\n"
60
  ]
61
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  {
63
  "cell_type": "markdown",
64
  "id": "edf44387-d166-4e0f-a8ad-621230aee115",
 
103
  "trips = list(packing_data.keys())\n",
104
  "# Access the first trip description\n",
105
  "first_trip = trips[0]\n",
106
+ "# Get the packing list for the secondfirst trip\n",
107
  "first_trip_items = packing_data[first_trip]\n",
108
  "\n",
109
  "print(f\"First trip: {first_trip} \\n\")\n",
 
188
  },
189
  {
190
  "cell_type": "code",
191
+ "execution_count": 39,
192
  "id": "116c7ee3-2b59-4623-a416-162c487aab70",
193
  "metadata": {},
194
  "outputs": [],
 
199
  },
200
  {
201
  "cell_type": "code",
202
+ "execution_count": 40,
203
  "id": "8591425b-ce55-4a36-a4b6-70974e8d4e59",
204
  "metadata": {},
205
+ "outputs": [
206
+ {
207
+ "name": "stdout",
208
+ "output_type": "stream",
209
+ "text": [
210
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
211
+ "| bart_base | bart_large_mnli |\n",
212
+ "+======================================================================+======================================================================+\n",
213
+ "| bandana | travel adapter |\n",
214
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
215
+ "| lip balm | travel journal |\n",
216
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
217
+ "| hand warmers | light jacket |\n",
218
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
219
+ "| sim card | sim card |\n",
220
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
221
+ "| neck wallet | bathing suit |\n",
222
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
223
+ "| tent | multi-tool |\n",
224
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
225
+ "| poncho | dry bag |\n",
226
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
227
+ "| gloves | travel pillow |\n",
228
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
229
+ "| painkiller | base layers |\n",
230
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
231
+ "| pen | day pack |\n",
232
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
233
+ "| thin scarf | entertainment for downtime (e.g. book/ebook, games, laptop, journal) |\n",
234
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
235
+ "| wallet | passport |\n",
236
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
237
+ "| sleeping bag | thin scarf |\n",
238
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
239
+ "| dry bag | comfortable shoes |\n",
240
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
241
+ "| gifts | lightweight backpack |\n",
242
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
243
+ "| fanny pack | beach bag |\n",
244
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
245
+ "| beach towel | swimsuit |\n",
246
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
247
+ "| underwear | short pants/skirts |\n",
248
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
249
+ "| swimsuit | quick-dry towel |\n",
250
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
251
+ "| blanket | sunhat |\n",
252
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
253
+ "| laptop | local currency |\n",
254
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
255
+ "| quick-dry towel | tickets |\n",
256
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
257
+ "| ski jacket | wallet |\n",
258
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
259
+ "| emergency roadside kit | cardigan/sweater |\n",
260
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
261
+ "| passport | refillable water bottle |\n",
262
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
263
+ "| necessary medication | fanny pack |\n",
264
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
265
+ "| snacks for the journey | poncho |\n",
266
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
267
+ "| snow boots | thermal clothing |\n",
268
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
269
+ "| sunglasses | laptop |\n",
270
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
271
+ "| flip-flops | pen |\n",
272
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
273
+ "| phone and charger | big backpack/suitcase |\n",
274
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
275
+ "| socks | beach towel |\n",
276
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
277
+ "| local currency | currency |\n",
278
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
279
+ "| t-shirts/tops | blanket |\n",
280
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
281
+ "| bathing suit | compass |\n",
282
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
283
+ "| currency | beanie |\n",
284
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
285
+ "| cardigan/sweater | sunscreen |\n",
286
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
287
+ "| snowboard | phone and charger |\n",
288
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
289
+ "| visa documents | reusable coffee mug |\n",
290
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
291
+ "| shirts | power bank |\n",
292
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
293
+ "| headphones | personal toiletries (e.g. toothbrush, toothpaste) |\n",
294
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
295
+ "| pants | packable rain jacket |\n",
296
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
297
+ "| refillable water bottle | bandana |\n",
298
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
299
+ "| beach bag | short pants |\n",
300
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
301
+ "| big backpack/suitcase | business attire |\n",
302
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
303
+ "| multi-tool | sleeping bag |\n",
304
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
305
+ "| sandals | flashlight |\n",
306
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
307
+ "| tickets | t-shirts/tops |\n",
308
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
309
+ "| thermal clothing | waterproof phone case |\n",
310
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
311
+ "| short pants/skirts | long pants |\n",
312
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
313
+ "| light jacket | travel-sized toiletries |\n",
314
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
315
+ "| base layers | visa documents |\n",
316
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
317
+ "| snacks | sandals |\n",
318
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
319
+ "| comfortable shoes | hand warmers |\n",
320
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
321
+ "| lightweight backpack | hostel lock |\n",
322
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
323
+ "| winter shoes | headphones |\n",
324
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
325
+ "| dress shoes | emergency roadside kit |\n",
326
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
327
+ "| power bank | ski jacket |\n",
328
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
329
+ "| packable rain jacket | shirts |\n",
330
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
331
+ "| first aid kit | first aid kit |\n",
332
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
333
+ "| water filter | reusable shopping bags |\n",
334
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
335
+ "| short pants | flip-flops |\n",
336
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
337
+ "| ski goggles | camping stove |\n",
338
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
339
+ "| waterproof phone case | water filter |\n",
340
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
341
+ "| sunhat | gloves |\n",
342
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
343
+ "| flashlight | dress shoes |\n",
344
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
345
+ "| earplugs | tent |\n",
346
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
347
+ "| beanie | sunglasses |\n",
348
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
349
+ "| hostel lock | pants |\n",
350
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
351
+ "| personal toiletries (e.g. toothbrush, toothpaste) | charging cables |\n",
352
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
353
+ "| travel journal | snacks |\n",
354
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
355
+ "| reusable coffee mug | neck wallet |\n",
356
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
357
+ "| sunscreen | snacks for the journey |\n",
358
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
359
+ "| travel pillow | ski goggles |\n",
360
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
361
+ "| trekking poles | mosquito repellant |\n",
362
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
363
+ "| business attire | snorkel gear |\n",
364
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
365
+ "| snorkel gear | bug spray |\n",
366
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
367
+ "| reusable shopping bags | earplugs |\n",
368
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
369
+ "| sleep wear | travel insurance documents |\n",
370
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
371
+ "| camping stove | painkiller |\n",
372
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
373
+ "| travel-sized toiletries | hiking boots |\n",
374
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
375
+ "| hiking boots | necessary medication |\n",
376
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
377
+ "| travel insurance documents | socks |\n",
378
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
379
+ "| long pants | underwear |\n",
380
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
381
+ "| charging cables | trekking poles |\n",
382
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
383
+ "| entertainment for downtime (e.g. book/ebook, games, laptop, journal) | sleep wear |\n",
384
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
385
+ "| bug spray | gifts |\n",
386
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
387
+ "| day pack | lip balm |\n",
388
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
389
+ "| travel adapter | snowboard |\n",
390
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
391
+ "| malaria medication | malaria medication |\n",
392
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
393
+ "| compass | snow boots |\n",
394
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n",
395
+ "| mosquito repellant | winter shoes |\n",
396
+ "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n"
397
+ ]
398
+ }
399
+ ],
400
  "source": [
401
  "# Creating a table\n",
402
  "table = zip(result_bart_base[\"labels\"], \n",
space/space/space/space/space/space/space/packing_templates_self_supported_offgrid_expanded.json ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "hut trek (summer)": [
3
+ "lichtgewicht trekkingrugzak (30\u201345 liter)",
4
+ "waterzak of drinkflessen",
5
+ "sneldrogende kleding",
6
+ "regenjas",
7
+ "snacks/energierepen",
8
+ "wandelschoenen",
9
+ "hoed of pet",
10
+ "zonnebrandcr\u00e8me",
11
+ "lakenzak",
12
+ "oordoppen"
13
+ ],
14
+ "hut trek (winter)": [
15
+ "slaapzak (lichte, warme variant)",
16
+ "lakenzak",
17
+ "binnenkleding voor hut (thermo / fleece)",
18
+ "hutpantoffels / Crocs",
19
+ "oordoppen",
20
+ "snacks voor onderweg",
21
+ "contant geld voor hutbetalingen"
22
+ ],
23
+ "camping trip (wild camping)": [
24
+ "tent of tarp",
25
+ "slaapmat",
26
+ "slaapzak",
27
+ "hoofdlamp",
28
+ "mes of multitool",
29
+ "waterfilter",
30
+ "kookset + brander",
31
+ "voedsel en snacks",
32
+ "aansteker",
33
+ "toilettas met biologisch afbreekbare zeep"
34
+ ],
35
+ "camping trip (campground)": [
36
+ "tent",
37
+ "kussen",
38
+ "stoeltje",
39
+ "kampeertafel (optioneel)",
40
+ "lamp of lantaarn",
41
+ "verlengsnoer (voor stroomcamping)",
42
+ "koelbox",
43
+ "servies & bestek",
44
+ "afwasspullen",
45
+ "boodschappen"
46
+ ],
47
+ "skitocht / skitour": [
48
+ "touringski's of splitboard",
49
+ "stijgvellen",
50
+ "lawinepieper (transceiver)",
51
+ "schep",
52
+ "sonde",
53
+ "gore-tex jas en broek",
54
+ "donsjas of warme isolatielaag",
55
+ "helm",
56
+ "handschoenen (2 paar)",
57
+ "ski- of zonnebril",
58
+ "thermosfles",
59
+ "sneldrogende baselayers",
60
+ "gamaschen (bij diepe sneeuw)",
61
+ "tourrugzak met ski-bevestiging",
62
+ "laagjes voor temperatuurregeling",
63
+ "energierepen of sportvoeding",
64
+ "GPS of offline kaarten",
65
+ "EHBO-set",
66
+ "lichte handschoenen voor het stijgen"
67
+ ],
68
+ "snowboard / splitboard trip": [
69
+ "splitboard of snowboard",
70
+ "lawinepieper",
71
+ "helm",
72
+ "dikke handschoenen",
73
+ "skibril",
74
+ "laagjes kleding",
75
+ "tourbindingen (voor splitboard)",
76
+ "stijgvellen",
77
+ "sonde en schep",
78
+ "lichte handschoenen voor het stijgen"
79
+ ],
80
+ "long-distance hike / thru-hike": [
81
+ "lichtgewicht rugzak",
82
+ "waterfilter",
83
+ "voedselvoorraad",
84
+ "navigatie (kaart/kompas/GPS)",
85
+ "first aid kit",
86
+ "trail runners of lichte wandelschoenen",
87
+ "zonbescherming",
88
+ "regenbescherming",
89
+ "natte doekjes",
90
+ "sneldrogende handdoek"
91
+ ],
92
+ "digital nomad trip": [
93
+ "laptop en oplader",
94
+ "wereldstekker",
95
+ "noise-cancelling koptelefoon",
96
+ "wifi-hotspot of lokale simkaart",
97
+ "powerbank",
98
+ "notitieboekje",
99
+ "lichte kleding",
100
+ "comfortabele rugzak of trolley",
101
+ "mifi-router of portable wifi-hotspot"
102
+ ],
103
+ "city trip": [
104
+ "dagrugzak",
105
+ "oplader voor telefoon",
106
+ "stadsplattegrond / offline maps",
107
+ "betaalkaart / contant geld",
108
+ "comfortabele schoenen",
109
+ "waterfles",
110
+ "reisgids of highlightslijst",
111
+ "lichte jas of regenjas"
112
+ ],
113
+ "road trip (car/camper)": [
114
+ "autopapieren",
115
+ "EHBO-kit",
116
+ "snacks en drinken",
117
+ "telefoonhouder / navigatie",
118
+ "kampeeruitrusting (indien overnachten)",
119
+ "reisgids of kaarten",
120
+ "zonnebril",
121
+ "stoel en tafel",
122
+ "vermaak (boek, muziek, spelletjes)"
123
+ ],
124
+ "festivaltrip": [
125
+ "tent",
126
+ "matje en slaapzak",
127
+ "oordoppen",
128
+ "poncho of regenjas",
129
+ "glitter / outfit",
130
+ "herbruikbare beker",
131
+ "zonnebrand",
132
+ "cash / pinpas",
133
+ "vriendenafspraken",
134
+ "snacks"
135
+ ],
136
+ "yoga / wellness retreat": [
137
+ "yogamat of yogahanddoek",
138
+ "comfortabele kleding",
139
+ "waterfles",
140
+ "dagboek / pen",
141
+ "warme trui of sjaal",
142
+ "sandalen",
143
+ "etherische olie (optioneel)",
144
+ "boek / meditatiemateriaal"
145
+ ],
146
+ "micro-adventure / weekend trip": [
147
+ "dagrugzak",
148
+ "snacks en drinken",
149
+ "regenjas",
150
+ "warme laag",
151
+ "kaart of GPS",
152
+ "toilettasje",
153
+ "hoofddeksel",
154
+ "compacte slaapuitrusting (indien overnachting)"
155
+ ],
156
+ "beach vacation": [
157
+ "zwemkleding",
158
+ "strandlaken",
159
+ "zonnebrandcr\u00e8me",
160
+ "zonnebril",
161
+ "hoed of pet",
162
+ "slippers",
163
+ "luchtige kleding",
164
+ "waterfles",
165
+ "strandtas",
166
+ "boek of e-reader"
167
+ ],
168
+ "cultural exploration": [
169
+ "dagrugzak",
170
+ "oplader voor telefoon",
171
+ "stadsplattegrond / offline maps",
172
+ "betaalkaart / contant geld",
173
+ "comfortabele schoenen",
174
+ "waterfles",
175
+ "reisgids of highlightslijst",
176
+ "lichte jas of regenjas"
177
+ ],
178
+ "nature escape": [
179
+ "dagrugzak",
180
+ "oplader voor telefoon",
181
+ "betaalkaart / contant geld",
182
+ "comfortabele schoenen",
183
+ "waterfles",
184
+ "downtime entertainment (e.g. book/ebook, games, laptop, journal)"
185
+ ],
186
+ "swimming": [
187
+ "badkleding",
188
+ "handdoek",
189
+ "zonnebrandcr\u00e8me",
190
+ "slippers",
191
+ "zwembril",
192
+ "waterfles"
193
+ ],
194
+ "going to the beach": [
195
+ "strandlaken",
196
+ "zwemkleding",
197
+ "zonnebrand",
198
+ "koeltas",
199
+ "strandstoel",
200
+ "zonnehoed"
201
+ ],
202
+ "relaxing": [
203
+ "comfortabele kleding",
204
+ "boek of e-reader",
205
+ "zitkussen of strandmat",
206
+ "muziek / koptelefoon"
207
+ ],
208
+ "sightseeing": [
209
+ "dagrugzak",
210
+ "waterfles",
211
+ "camera of smartphone",
212
+ "kaart of offline maps",
213
+ "zonnebril"
214
+ ],
215
+ "biking": [
216
+ "fiets of huurfiets",
217
+ "helm",
218
+ "sportkleding",
219
+ "fietslicht en slot",
220
+ "reparatieset"
221
+ ],
222
+ "running": [
223
+ "hardloopschoenen",
224
+ "sportkleding",
225
+ "zweetband of pet",
226
+ "waterfles of belt",
227
+ "sporthorloge (optioneel)"
228
+ ],
229
+ "skiing": [
230
+ "ski\u2019s en stokken",
231
+ "skischoenen",
232
+ "helm",
233
+ "skibril",
234
+ "skipas",
235
+ "dikke handschoenen",
236
+ "gore-tex kleding"
237
+ ],
238
+ "cross-country skiing": [
239
+ "langlaufski's en stokken",
240
+ "langlaufschoenen",
241
+ "ademende thermokleding",
242
+ "winddichte en waterafstotende buitenlaag",
243
+ "dunne handschoenen",
244
+ "muts of hoofdband",
245
+ "zonnebril of sportbril",
246
+ "buff of nekwarmer",
247
+ "lichte rugzak met water en snacks",
248
+ "EHBO-set"
249
+ ],
250
+ "ski touring": [
251
+ "algemene items voor deze situatie",
252
+ "extra kleding of uitrusting indien nodig"
253
+ ],
254
+ "hiking": [
255
+ "wandelschoenen of trailrunners",
256
+ "hiking sokken (anti-blaren)",
257
+ "hikingstokken",
258
+ "dagrugzak",
259
+ "regenjas of poncho",
260
+ "waterfles of waterzak",
261
+ "snacks / energierepen",
262
+ "zonnebrand en zonnebril",
263
+ "pet of hoed",
264
+ "blarenpleisters of tape",
265
+ "EHBO-set",
266
+ "navigatie (kaart, kompas of GPS)"
267
+ ],
268
+ "hut-to-hut hiking": [
269
+ "lichtgewicht trekkingrugzak (30\u201345 liter)",
270
+ "waterflessen of waterzak",
271
+ "snacks voor onderweg",
272
+ "regenjas",
273
+ "licht donsjack of warme laag",
274
+ "sneldrogende kleding",
275
+ "wandelschoenen",
276
+ "lakenzak",
277
+ "pantoffels of slippers voor hut",
278
+ "oordoppen",
279
+ "reserveringsbevestiging / contant geld voor hut"
280
+ ],
281
+ "rock climbing": [
282
+ "klimschoenen",
283
+ "klimgordel",
284
+ "zekeringsapparaat",
285
+ "chalk bag",
286
+ "helm"
287
+ ],
288
+ "ice climbing": [
289
+ "ijsbijlen",
290
+ "crampons",
291
+ "klimtouw",
292
+ "warme isolatielagen",
293
+ "klimgordel en helm"
294
+ ],
295
+ "snowshoe hiking": [
296
+ "algemene items voor deze situatie",
297
+ "extra kleding of uitrusting indien nodig"
298
+ ],
299
+ "kayaking / canoeing": [
300
+ "kayak of kano",
301
+ "peddel",
302
+ "reddingsvest",
303
+ "drybag",
304
+ "waterschoenen"
305
+ ],
306
+ "stand-up paddleboarding (SUP)": [
307
+ "SUP-board en peddel",
308
+ "reddingsvest",
309
+ "drybag",
310
+ "zonnebril met koord"
311
+ ],
312
+ "snorkeling": [
313
+ "snorkel en duikbril",
314
+ "zwemvliezen",
315
+ "badkleding",
316
+ "waterdichte tas"
317
+ ],
318
+ "scuba diving": [
319
+ "duikbril en snorkel",
320
+ "wetsuit",
321
+ "vinnen",
322
+ "duikcomputer",
323
+ "ademautomaat (indien eigen)"
324
+ ],
325
+ "surfing": [
326
+ "surfboard",
327
+ "wetsuit",
328
+ "wax",
329
+ "board leash",
330
+ "poncho of handdoek"
331
+ ],
332
+ "paragliding": [
333
+ "paraglider",
334
+ "helm",
335
+ "handschoenen",
336
+ "wandelschoenen",
337
+ "windjack"
338
+ ],
339
+ "horseback riding": [
340
+ "rijbroek",
341
+ "rijlaarzen of schoenen met hak",
342
+ "handschoenen",
343
+ "helm",
344
+ "eventueel eigen zadel of stijgbeugels"
345
+ ],
346
+ "photography": [
347
+ "camera + lenzen",
348
+ "statief",
349
+ "geheugenkaart(en)",
350
+ "extra accu's",
351
+ "lensdoekje"
352
+ ],
353
+ "fishing": [
354
+ "hengel",
355
+ "aas / kunstaas",
356
+ "visvergunning",
357
+ "laarzen of waadpak",
358
+ "koelbox (optioneel)"
359
+ ],
360
+ "rafting": [
361
+ "reddingsvest",
362
+ "waterdichte tas",
363
+ "waterschoenen",
364
+ "sneldrogende kleding",
365
+ "helm"
366
+ ],
367
+ "yoga": [
368
+ "algemene items voor deze situatie",
369
+ "extra kleding of uitrusting indien nodig"
370
+ ],
371
+ "cold destination / winter": [
372
+ "thermokleding",
373
+ "muts en handschoenen",
374
+ "warme jas",
375
+ "waterdichte schoenen",
376
+ "lippenbalsem"
377
+ ],
378
+ "warm destination / summer": [
379
+ "luchtige kleding",
380
+ "zonnebril",
381
+ "zonnecr\u00e8me",
382
+ "waterfles",
383
+ "hoed of pet"
384
+ ],
385
+ "variable weather / spring / autumn": [
386
+ "regenjas",
387
+ "laagjeskleding",
388
+ "warme trui",
389
+ "waterdichte schoenen"
390
+ ],
391
+ "tropical / humid": [
392
+ "lichtgewicht kleding",
393
+ "insectenspray",
394
+ "zonnecr\u00e8me",
395
+ "regenponcho",
396
+ "sneldrogende handdoek"
397
+ ],
398
+ "dry / desert-like": [
399
+ "zonnebril",
400
+ "zonnecr\u00e8me",
401
+ "hoofdbescherming",
402
+ "lichte lange mouwen",
403
+ "veel water"
404
+ ],
405
+ "ultralight": [
406
+ "lichtgewicht rugzak (< 1kg)",
407
+ "minimalistische shelter (tarp of tent)",
408
+ "lichtgewicht slaapmat",
409
+ "quilt of donsdeken",
410
+ "titanium kookset",
411
+ "beperkte kleding (laagjes!)",
412
+ "compacte regenjas",
413
+ "sneldrogende handdoek",
414
+ "tandenborstel (afgezaagd ;))"
415
+ ],
416
+ "lightweight (but comfortable)": [
417
+ "lichte tent of tarp",
418
+ "comfortabele slaapmat",
419
+ "lichtgewicht kookset",
420
+ "compact kledingpakket",
421
+ "sneldrogende handdoek"
422
+ ],
423
+ "luxury (including evening wear)": [
424
+ "nette schoenen",
425
+ "jurk of overhemd",
426
+ "parfum/deodorant",
427
+ "accessoires",
428
+ "reistoilettas met essentials"
429
+ ],
430
+ "minimalist": [
431
+ "1 set kleding voor elke situatie",
432
+ "compacte tandenborstel",
433
+ "alles-in-\u00e9\u00e9n zeep",
434
+ "lichtgewicht handdoek",
435
+ "kleine rugzak"
436
+ ],
437
+ "casual": [
438
+ "jeans of comfortabele broek",
439
+ "t-shirts",
440
+ "trui of hoodie",
441
+ "sneakers",
442
+ "zonnebril"
443
+ ],
444
+ "formal (business trip)": [
445
+ "overhemd / blouse",
446
+ "net jasje",
447
+ "nette schoenen",
448
+ "laptop en oplader",
449
+ "naamkaartjes / documenten"
450
+ ],
451
+ "conservative": [
452
+ "bedekkende kleding",
453
+ "sjaal of omslagdoek",
454
+ "lange broek of rok",
455
+ "gesloten schoenen"
456
+ ],
457
+ "outdoor": [
458
+ "regenjas",
459
+ "waterdichte schoenen",
460
+ "bivakzak of tarp",
461
+ "hoofdlamp",
462
+ "navigatie"
463
+ ],
464
+ "indoor": [
465
+ "pantoffels",
466
+ "comfortabele kleding",
467
+ "pyjama",
468
+ "oplader",
469
+ "toilettas"
470
+ ],
471
+ "huts with half board": [
472
+ "reserveringsbevestiging",
473
+ "lakenzak (vaak verplicht)",
474
+ "pantoffels of slippers voor binnen",
475
+ "lichte pyjama of slaapkleding",
476
+ "oorstopjes",
477
+ "waterfles",
478
+ "snacks voor onderweg",
479
+ "kleine toilettas",
480
+ "contant geld voor betalingen",
481
+ "lichte handdoek",
482
+ "zaklamp of hoofdlamp"
483
+ ],
484
+ "sleeping in a tent": [
485
+ "tent (1- of 2-persoons, afhankelijk van reis)",
486
+ "grondzeil",
487
+ "slaapmat",
488
+ "slaapzak (passend bij temperatuur)",
489
+ "hoofdlamp of zaklamp",
490
+ "sneldrogende handdoek",
491
+ "kussen of opblaasbaar hoofdkussen",
492
+ "oorstopjes",
493
+ "toilettas",
494
+ "powerbank",
495
+ "waterfles",
496
+ "boek of e-reader",
497
+ "EHBO-set"
498
+ ],
499
+ "sleeping in a car": [
500
+ "slaapmat die past in auto",
501
+ "warme slaapzak",
502
+ "zonneschermen of verduistering",
503
+ "kussen",
504
+ "waterfles binnen handbereik",
505
+ "powerbank of 12V-lader",
506
+ "toilettas",
507
+ "snacks voor de nacht",
508
+ "thermische deken (voor koude nachten)"
509
+ ],
510
+ "vehicle": [
511
+ "rijbewijs",
512
+ "autopapieren",
513
+ "EHBO-set",
514
+ "navigatie of smartphone",
515
+ "telefoonhouder"
516
+ ],
517
+ "no vehicle": [
518
+ "rugzak",
519
+ "waterfles",
520
+ "lichte schoenen",
521
+ "openbaar vervoer app of ticket"
522
+ ],
523
+ "off-grid / no electricity": [
524
+ "powerbank (minstens 10.000 mAh)",
525
+ "zonnepaneel of draagbaar laadsysteem",
526
+ "hoofdlamp + extra batterijen",
527
+ "oplaadbare batterijen en oplader",
528
+ "back-up verlichting (bijv. kleine zaklamp)",
529
+ "papieren kaart en kompas",
530
+ "notitieboekje + pen",
531
+ "noodcommunicatie (bijv. GPS beacon of satellietboodschapper)",
532
+ "opvouwbaar zonnepaneel (indien langere tochten)",
533
+ "navigatieapparaat met offline kaarten",
534
+ "extra opladerkabels",
535
+ "USB-hub (voor meerdere devices)",
536
+ "verpakking om elektronica droog te houden"
537
+ ],
538
+ "self-supported (bring your own food/cooking)": [
539
+ "lichtgewicht kooktoestel (gas, benzine of alcohol)",
540
+ "brandstof (voldoende voor aantal dagen)",
541
+ "pan of keteltje",
542
+ "spork of lepel",
543
+ "opvouwbaar snijplankje (optioneel)",
544
+ "aandrukbare kom of beker",
545
+ "aansteker + lucifers (waterdicht verpakt)",
546
+ "gedroogde of gevriesdroogde maaltijden",
547
+ "snacks / noodbars",
548
+ "afwasmiddel (biologisch afbreekbaar) + sponsje",
549
+ "opslagzakken voor afval",
550
+ "waterfilter of -pomp",
551
+ "chloordruppels of zuiveringstabletten",
552
+ "minstens 2 liter wateropslag per persoon",
553
+ "food bag of hangzak voor voedsel (wild-safe)"
554
+ ],
555
+ "child-friendly": [
556
+ "snacks en speelgoed",
557
+ "EHBO-set met pleisters",
558
+ "extra kleding",
559
+ "kinderdrager of buggy",
560
+ "zonbescherming",
561
+ "extra snacks voor kinderen",
562
+ "favoriet speelgoed of knuffel",
563
+ "reisstoel of draagzak",
564
+ "natte doekjes",
565
+ "luiers of potje (afhankelijk van leeftijd)",
566
+ "extra set kleding per dag",
567
+ "reisapotheek voor kinderen",
568
+ "kinderzonnebrand",
569
+ "activiteitenboekje of tablet met filmpjes",
570
+ "snelle snacks voor onderweg",
571
+ "kinder slaapzak of slaapmat",
572
+ "reisbedje of matrasje (voor jonge kinderen)",
573
+ "luiers of oefenbroekjes",
574
+ "flesjes en voeding (indien van toepassing)",
575
+ "babyfoon (voor verblijf op locatie)",
576
+ "speen of fopspeen",
577
+ "kinder EHBO-set (inclusief thermometer)",
578
+ "zonnehoedje of muts",
579
+ "regenhoes voor kinderwagen of draagzak",
580
+ "hydraterende cr\u00e8me (voor gevoelige huid)"
581
+ ],
582
+ "pet-friendly": [
583
+ "voer en waterbak",
584
+ "hondenriem of tuigje",
585
+ "poepzakjes",
586
+ "reismatje of deken",
587
+ "vaccinatieboekje"
588
+ ],
589
+ "rainy climate": [
590
+ "regenjas of poncho",
591
+ "waterdichte rugzakhoes",
592
+ "sneldrogende kleding",
593
+ "rubberen schoenen"
594
+ ],
595
+ "snow and ice": [
596
+ "warme laarzen",
597
+ "dikke handschoenen",
598
+ "sneeuwbril",
599
+ "thermokleding",
600
+ "gripzolen / spikes"
601
+ ],
602
+ "high alpine terrain": [
603
+ "wandelstokken",
604
+ "kaart en kompas",
605
+ "wind- en waterdichte jas",
606
+ "extra voeding",
607
+ "EHBO-kit"
608
+ ],
609
+ "avalanche-prone terrain": [
610
+ "lawinepieper",
611
+ "schep",
612
+ "sonde",
613
+ "airbag rugzak (indien beschikbaar)",
614
+ "kennis van lawineveiligheid / cursus",
615
+ "kaart en kompas / GPS",
616
+ "partner check voor vertrek"
617
+ ],
618
+ "no special conditions": [
619
+ ]
620
+ "1 day": [
621
+ "ondergoed per dag",
622
+ "sokken per dag",
623
+ "toilettas",
624
+ "extra kledinglaag",
625
+ "aantal maaltijden/snacks afgestemd op duur"
626
+ ],
627
+ "2 days": [
628
+ "ondergoed per dag",
629
+ "sokken per dag",
630
+ "toilettas",
631
+ "extra kledinglaag",
632
+ "aantal maaltijden/snacks afgestemd op duur"
633
+ ],
634
+ "3 days": [
635
+ "ondergoed per dag",
636
+ "sokken per dag",
637
+ "toilettas",
638
+ "extra kledinglaag",
639
+ "aantal maaltijden/snacks afgestemd op duur"
640
+ ],
641
+ "4 days": [
642
+ "ondergoed per dag",
643
+ "sokken per dag",
644
+ "toilettas",
645
+ "extra kledinglaag",
646
+ "aantal maaltijden/snacks afgestemd op duur"
647
+ ],
648
+ "5 days": [
649
+ "ondergoed per dag",
650
+ "sokken per dag",
651
+ "toilettas",
652
+ "extra kledinglaag",
653
+ "aantal maaltijden/snacks afgestemd op duur"
654
+ ],
655
+ "6 days": [
656
+ "ondergoed per dag",
657
+ "sokken per dag",
658
+ "toilettas",
659
+ "extra kledinglaag",
660
+ "aantal maaltijden/snacks afgestemd op duur"
661
+ ],
662
+ "7 days": [
663
+ "ondergoed per dag",
664
+ "sokken per dag",
665
+ "toilettas",
666
+ "extra kledinglaag",
667
+ "aantal maaltijden/snacks afgestemd op duur"
668
+ ],
669
+ "7+ days": [
670
+ "ondergoed per dag",
671
+ "sokken per dag",
672
+ "toilettas",
673
+ "extra kledinglaag",
674
+ "aantal maaltijden/snacks afgestemd op duur"
675
+ ],
676
+ "default_items": [
677
+ "telefoon + oplader",
678
+ "powerbank",
679
+ "identiteitsbewijs of paspoort",
680
+ "betaalmiddelen (pinpas / contant geld)",
681
+ "verzekeringspas / reisverzekering-info",
682
+ "toilettas (tandpasta, borstel, deodorant, zeep)",
683
+ "zonnebrandcr\u00e8me",
684
+ "lippenbalsem",
685
+ "ondergoed",
686
+ "sokken",
687
+ "regenjas of windjack",
688
+ "waterfles",
689
+ "snacks",
690
+ "zonnebril",
691
+ "oorstopjes",
692
+ "slaapmasker",
693
+ "herbruikbare tas",
694
+ "zakdoekjes of toiletpapier"
695
+ ]
696
+ }
space/space/space/space/space/space/space/space/labels.txt CHANGED
@@ -1,89 +1,93 @@
1
- Swimsuit
2
- Sunscreen
3
- Flip-flops
4
- Beach towel
5
- Sunglasses
6
- Waterproof phone case
7
- Hat
8
- Beach bag
9
- Snorkel gear
10
- Aloe vera gel
11
- Tent
12
- Sleeping bag
13
- Camping stove
14
- Flashlight
15
- Hiking boots
16
- Water filter
17
- Compass
18
- First aid kit
19
- Bug spray
20
- Multi-tool
21
- Thermal clothing
22
- Ski jacket
23
- Ski goggles
24
- Snow boots
25
- Gloves
26
- Hand warmers
27
- Beanie
28
- Lip balm
29
- Snowboard
30
- Base layers
31
- Passport
32
- Visa documents
33
- Travel adapter
34
- Currency
35
- Language phrasebook
36
- SIM card
37
- Travel pillow
38
- Neck wallet
39
- Travel insurance documents
40
- Power bank
41
- Laptop
42
- Notebook
43
- Business attire
44
- Dress shoes
45
- Charging cables
46
- Presentation materials
47
- Work ID badge
48
- Pen
49
- Headphones
50
- Lightweight backpack
51
- Travel-sized toiletries
52
- Packable rain jacket
53
- Reusable water bottle
54
- Dry bag
55
- Trekking poles
56
- Hostel lock
57
- Quick-dry towel
58
- Travel journal
59
- Energy bars
60
- Car charger
61
- Snacks
62
- Map
63
- Sunglasses
64
- Cooler
65
- Blanket
66
- Emergency roadside kit
67
- Reusable coffee mug
68
- Playlist
69
- Reusable shopping bags
70
- Earplugs
71
- Fanny pack
72
- Portable charger
73
- Poncho
74
- Bandana
75
- Comfortable shoes
76
- Tent
77
- Refillable water bottle
78
- Glow sticks
79
- Festival tickets
80
- Diapers
81
- Baby wipes
82
- Baby food
83
- Stroller
84
- Pacifier
85
- Baby clothes
86
- Baby blanket
87
- Travel crib
88
- Toys
89
- Nursing cover
 
 
 
 
 
1
+ sunscreen
2
+ sunglasses
3
+ sunhat
4
+ flip-flops
5
+ swimsuit
6
+ beach towel
7
+ waterproof phone case
8
+ beach bag
9
+ snorkel gear
10
+ tent
11
+ sleeping bag
12
+ camping stove
13
+ flashlight
14
+ hiking boots
15
+ water filter
16
+ compass
17
+ first aid kit
18
+ bug spray
19
+ multi-tool
20
+ thermal clothing
21
+ ski jacket
22
+ ski goggles
23
+ snow boots
24
+ gloves
25
+ hand warmers
26
+ beanie
27
+ lip balm
28
+ snowboard
29
+ base layers
30
+ passport
31
+ visa documents
32
+ travel adapter
33
+ currency
34
+ sim card
35
+ travel pillow
36
+ neck wallet
37
+ travel insurance documents
38
+ power bank
39
+ laptop
40
+ business attire
41
+ dress shoes
42
+ charging cables
43
+ pen
44
+ headphones
45
+ lightweight backpack
46
+ travel-sized toiletries
47
+ packable rain jacket
48
+ dry bag
49
+ trekking poles
50
+ hostel lock
51
+ quick-dry towel
52
+ travel journal
53
+ snacks
54
+ blanket
55
+ emergency roadside kit
56
+ reusable coffee mug
57
+ reusable shopping bags
58
+ earplugs
59
+ fanny pack
60
+ poncho
61
+ bandana
62
+ comfortable shoes
63
+ bathing suit
64
+ sandals
65
+ light jacket
66
+ entertainment for downtime (e.g. book/ebook, games, laptop, journal)
67
+ short pants/skirts
68
+ t-shirts/tops
69
+ thin scarf
70
+ pants
71
+ shirts
72
+ cardigan/sweater
73
+ gifts
74
+ winter shoes
75
+ long pants
76
+ short pants
77
+ malaria medication
78
+ mosquito repellant
79
+ local currency
80
+ wallet
81
+ tickets
82
+ phone and charger
83
+ painkiller
84
+ necessary medication
85
+ personal toiletries (e.g. toothbrush, toothpaste)
86
+ underwear
87
+ socks
88
+ sleep wear
89
+ snacks for the journey
90
+ refillable water bottle
91
+ day pack
92
+ big backpack/suitcase
93
+
space/space/space/space/space/space/space/space/packing_list_api.ipynb CHANGED
@@ -124,7 +124,58 @@
124
  },
125
  {
126
  "cell_type": "code",
127
- "execution_count": 5,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  "id": "d0d8f7c0-c2d9-4fbe-b1a7-699a5b99466c",
129
  "metadata": {},
130
  "outputs": [
@@ -140,82 +191,89 @@
140
  }
141
  ],
142
  "source": [
143
- "from transformers import pipeline\n",
144
- "\n",
145
- "# Load the model and create a pipeline for zero-shot classification\n",
146
- "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-base\")"
147
  ]
148
  },
149
  {
150
  "cell_type": "code",
151
- "execution_count": 6,
152
- "id": "4682d620-c9a6-40ad-ab4c-268ee0ef7212",
153
  "metadata": {},
154
  "outputs": [
155
  {
156
  "name": "stderr",
157
  "output_type": "stream",
158
  "text": [
159
- "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
160
  ]
161
- },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  {
163
- "name": "stdout",
164
  "output_type": "stream",
165
  "text": [
166
- "{'sequence': 'I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.', 'labels': ['Swimsuit', 'Travel crib', 'Business attire', 'Toys', 'Notebook', 'Travel adapter', 'Compass', 'Travel pillow', 'Headphones', 'Travel journal', 'Playlist', 'Flip-flops', 'Hiking boots', 'Reusable coffee mug', 'Comfortable shoes', 'Nursing cover', 'Gloves', 'Tent', 'Tent', 'Sunglasses', 'Sunglasses', 'Charging cables', 'Travel-sized toiletries', 'Refillable water bottle', 'Energy bars', 'Dress shoes', 'Festival tickets', 'Lightweight backpack', 'Packable rain jacket', 'Flashlight', 'Hostel lock', 'Presentation materials', 'Thermal clothing', 'Snowboard', 'Camping stove', 'Reusable shopping bags', 'Reusable water bottle', 'Blanket', 'Diapers', 'Snorkel gear', 'Snacks', 'Emergency roadside kit', 'Beach towel', 'Sunscreen', 'Car charger', 'Bug spray', 'Passport', 'Currency', 'Beach bag', 'Ski jacket', 'First aid kit', 'Cooler', 'Quick-dry towel', 'Laptop', 'Aloe vera gel', 'Earplugs', 'Baby wipes', 'Ski goggles', 'Travel insurance documents', 'Portable charger', 'Beanie', 'Bandana', 'Multi-tool', 'Pacifier', 'Stroller', 'Language phrasebook', 'Waterproof phone case', 'Dry bag', 'Map', 'Lip balm', 'Fanny pack', 'Trekking poles', 'Power bank', 'Baby clothes', 'Baby food', 'Poncho', 'Sleeping bag', 'Work ID badge', 'Visa documents', 'SIM card', 'Water filter', 'Snow boots', 'Hand warmers', 'Baby blanket', 'Base layers', 'Pen', 'Hat', 'Neck wallet', 'Glow sticks'], 'scores': [0.012542711570858955, 0.012216676957905293, 0.012068654410541058, 0.011977529153227806, 0.011932261288166046, 0.011920000426471233, 0.011883101426064968, 0.011842883192002773, 0.011819617822766304, 0.011810989119112492, 0.011761271394789219, 0.011756575666368008, 0.011726364493370056, 0.011664840392768383, 0.011632450856268406, 0.01163020171225071, 0.01158054918050766, 0.011572858318686485, 0.011572858318686485, 0.011541635729372501, 0.011541635729372501, 0.011517350561916828, 0.011510960757732391, 0.011489875614643097, 0.011469963937997818, 0.011466587893664837, 0.011442759074270725, 0.011438597925007343, 0.011437375098466873, 0.011433145962655544, 0.011407203041017056, 0.011401104740798473, 0.01135423593223095, 0.011333385482430458, 0.011328010819852352, 0.011325137689709663, 0.01131997536867857, 0.011306566186249256, 0.011299673467874527, 0.011281789280474186, 0.011264320462942123, 0.011257764883339405, 0.011256475001573563, 0.011253912933170795, 0.011252702213823795, 0.011248898692429066, 0.011247594840824604, 0.011239985004067421, 0.01121864840388298, 0.011208567768335342, 0.011174682527780533, 0.011166973039507866, 0.011159253306686878, 0.011151333339512348, 0.011140624061226845, 0.011139076203107834, 0.01113345380872488, 0.011126152239739895, 0.011093570850789547, 0.011078842915594578, 0.011067545972764492, 0.011044573038816452, 0.01101986039429903, 0.011016158387064934, 0.011015082709491253, 0.011007890105247498, 0.010997296310961246, 0.010962157510221004, 0.01095755398273468, 0.010940180160105228, 0.01088095735758543, 0.010869039222598076, 0.010858545079827309, 0.010820968076586723, 0.01080892514437437, 0.010798529721796513, 0.01077410951256752, 0.010764310136437416, 0.010748079977929592, 0.010681436397135258, 0.010675576515495777, 0.010557047091424465, 0.010552684776484966, 0.010509641841053963, 0.010396942496299744, 0.01037551462650299, 0.01033466774970293, 0.010237698443233967, 0.009954877197742462]}\n"
167
  ]
168
  }
169
  ],
170
  "source": [
171
- "input_text = \"I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.\"\n",
172
- "\n",
173
- "# Candidate labels\n",
174
- "candidate_labels = [\n",
175
- " \"Swimsuit\", \"Sunscreen\", \"Flip-flops\", \"Beach towel\", \"Sunglasses\", \n",
176
- " \"Waterproof phone case\", \"Hat\", \"Beach bag\", \"Snorkel gear\", \"Aloe vera gel\",\n",
177
- " \"Tent\", \"Sleeping bag\", \"Camping stove\", \"Flashlight\", \"Hiking boots\",\n",
178
- " \"Water filter\", \"Compass\", \"First aid kit\", \"Bug spray\", \"Multi-tool\",\n",
179
- " \"Thermal clothing\", \"Ski jacket\", \"Ski goggles\", \"Snow boots\", \"Gloves\",\n",
180
- " \"Hand warmers\", \"Beanie\", \"Lip balm\", \"Snowboard\", \"Base layers\",\n",
181
- " \"Passport\", \"Visa documents\", \"Travel adapter\", \"Currency\", \"Language phrasebook\",\n",
182
- " \"SIM card\", \"Travel pillow\", \"Neck wallet\", \"Travel insurance documents\", \"Power bank\",\n",
183
- " \"Laptop\", \"Notebook\", \"Business attire\", \"Dress shoes\", \"Charging cables\",\n",
184
- " \"Presentation materials\", \"Work ID badge\", \"Pen\", \"Headphones\", \n",
185
- " \"Lightweight backpack\", \"Travel-sized toiletries\", \"Packable rain jacket\",\n",
186
- " \"Reusable water bottle\", \"Dry bag\", \"Trekking poles\", \"Hostel lock\", \"Quick-dry towel\",\n",
187
- " \"Travel journal\", \"Energy bars\", \"Car charger\", \"Snacks\", \"Map\",\n",
188
- " \"Sunglasses\", \"Cooler\", \"Blanket\", \"Emergency roadside kit\", \"Reusable coffee mug\",\n",
189
- " \"Playlist\", \"Reusable shopping bags\", \"Earplugs\", \"Fanny pack\", \"Portable charger\",\n",
190
- " \"Poncho\", \"Bandana\", \"Comfortable shoes\", \"Tent\", \"Refillable water bottle\",\n",
191
- " \"Glow sticks\", \"Festival tickets\", \"Diapers\", \"Baby wipes\", \"Baby food\",\n",
192
- " \"Stroller\", \"Pacifier\", \"Baby clothes\", \"Baby blanket\", \"Travel crib\",\n",
193
- " \"Toys\", \"Nursing cover\"\n",
194
- "]\n",
195
- "\n",
196
- "\n",
197
- "# Run the classification\n",
198
- "result = classifier(input_text, candidate_labels)\n",
199
- "\n",
200
- "# Print the result\n",
201
- "print(result)"
202
  ]
203
  },
204
  {
205
  "cell_type": "code",
206
  "execution_count": null,
207
- "id": "a344a80f-7645-4c2c-b960-580aa0b345f6",
208
  "metadata": {},
209
  "outputs": [],
210
- "source": []
 
 
 
211
  },
212
  {
213
  "cell_type": "code",
214
  "execution_count": null,
215
- "id": "5eb705d6-c31c-406c-9739-ff45b66c7ca4",
216
  "metadata": {},
217
  "outputs": [],
218
- "source": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
  {
221
  "cell_type": "code",
@@ -224,21 +282,17 @@
224
  "metadata": {},
225
  "outputs": [],
226
  "source": [
227
- "# Example text to classify\n",
228
- "text = \"I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.\"\n",
229
- "\n",
230
  "# No prompt\n",
231
- "no_prompt = text\n",
232
- "no_result = classifier(no_prompt, candidate_labels)\n",
233
- "\n",
234
  "\n",
235
  "# Simple prompt\n",
236
- "simple_prompt = \"Classify the following text: \" + text\n",
237
- "simple_result = classifier(simple_prompt, candidate_labels)\n",
238
  "\n",
239
  "# Primed prompt\n",
240
- "primed_prompt = \"I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july. What are the most important things to pack for the trip?\"\n",
241
- "primed_result = classifier(primed_prompt, candidate_labels)"
242
  ]
243
  },
244
  {
@@ -436,9 +490,6 @@
436
  }
437
  ],
438
  "source": [
439
- "from tabulate import tabulate\n",
440
- "\n",
441
- "\n",
442
  "# Creating a table\n",
443
  "table = zip(no_result[\"labels\"], no_result[\"scores\"], \n",
444
  " simple_result[\"labels\"], simple_result[\"scores\"], \n",
@@ -447,14 +498,6 @@
447
  "\n",
448
  "print(tabulate(table, headers=headers, tablefmt=\"grid\"))\n"
449
  ]
450
- },
451
- {
452
- "cell_type": "code",
453
- "execution_count": null,
454
- "id": "5ed9bda0-41f2-4c7c-b055-27c1998c1d4e",
455
- "metadata": {},
456
- "outputs": [],
457
- "source": []
458
  }
459
  ],
460
  "metadata": {
 
124
  },
125
  {
126
  "cell_type": "code",
127
+ "execution_count": 35,
128
+ "id": "1d01a363-572b-450c-8fce-0721234f9a1a",
129
+ "metadata": {},
130
+ "outputs": [
131
+ {
132
+ "name": "stdout",
133
+ "output_type": "stream",
134
+ "text": [
135
+ "First trip: 7-Day Island Beach Holiday in Greece (Summer). I am planning a trip to Greece with my boyfriend, where we will visit two islands. We have booked an apartment on each island for a few days and plan to spend most of our time relaxing. Our main goals are to enjoy the beach, try delicious local food, and possibly go on a hike—if it’s not too hot. We will be relying solely on public transport. We’re in our late 20s and traveling from the Netherlands. \n",
136
+ "\n",
137
+ "Packing list: ['bathing suit', 'beach towel', 'beach bag', 'sandals', 'comfortable walking shoes', 'light jacket', 'sunscreen', 'sunglasses', 'sunhat', 'entertainment for downtime (e.g. book/ebook, games, laptop, journal)', 'short pants/skirts', 't-shirts/tops']\n"
138
+ ]
139
+ }
140
+ ],
141
+ "source": [
142
+ "# Prerequisites\n",
143
+ "from tabulate import tabulate\n",
144
+ "from transformers import pipeline\n",
145
+ "import json\n",
146
+ "\n",
147
+ "# input text\n",
148
+ "input_text = \"I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.\"\n",
149
+ "\n",
150
+ "# Load labels from a txt file\n",
151
+ "with open(\"labels.txt\", \"r\", encoding=\"utf-8\") as f:\n",
152
+ " class_labels = [line.strip() for line in f if line.strip()]\n",
153
+ "\n",
154
+ "# Load test data (in dictionary)\n",
155
+ "with open(\"test_data.json\", \"r\") as file:\n",
156
+ " packing_data = json.load(file)\n",
157
+ "# Get a list of trip descriptions (keys)\n",
158
+ "trips = list(packing_data.keys())\n",
159
+ "# Access the first trip description\n",
160
+ "first_trip = trips[0]\n",
161
+ "# Get the packing list for the second trip\n",
162
+ "first_trip_items = packing_data[first_trip]\n",
163
+ "\n",
164
+ "print(f\"First trip: {first_trip} \\n\")\n",
165
+ "print(f\"Packing list: {first_trip_items}\")"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "markdown",
170
+ "id": "88aa1d7e-8a32-4530-9ddd-60fa38e4a342",
171
+ "metadata": {},
172
+ "source": [
173
+ "Load classifiers"
174
+ ]
175
+ },
176
+ {
177
+ "cell_type": "code",
178
+ "execution_count": 36,
179
  "id": "d0d8f7c0-c2d9-4fbe-b1a7-699a5b99466c",
180
  "metadata": {},
181
  "outputs": [
 
191
  }
192
  ],
193
  "source": [
194
+ "# Load smaller the model and create a pipeline for zero-shot classification (1min loading + classifying with 89 labels)\n",
195
+ "classifier_bart_base = pipeline(\"zero-shot-classification\", model=\"facebook/bart-base\")"
 
 
196
  ]
197
  },
198
  {
199
  "cell_type": "code",
200
+ "execution_count": 37,
201
+ "id": "a971ca1c-d478-489f-9592-bc243d587eb4",
202
  "metadata": {},
203
  "outputs": [
204
  {
205
  "name": "stderr",
206
  "output_type": "stream",
207
  "text": [
208
+ "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n"
209
  ]
210
+ }
211
+ ],
212
+ "source": [
213
+ "# Load larger the model and create a pipeline for zero-shot classification (5min loading model + classifying with 89 labels)\n",
214
+ "classifier_bart_large_mnli = pipeline(\"zero-shot-classification\", model=\"facebook/bart-large-mnli\")"
215
+ ]
216
+ },
217
+ {
218
+ "cell_type": "markdown",
219
+ "id": "38805499-9919-40fe-9d42-de6869ba01dc",
220
+ "metadata": {},
221
+ "source": [
222
+ "Try classifiers"
223
+ ]
224
+ },
225
+ {
226
+ "cell_type": "code",
227
+ "execution_count": 38,
228
+ "id": "abb13524-71c6-448d-948d-fb22a0e0ceeb",
229
+ "metadata": {},
230
+ "outputs": [
231
  {
232
+ "name": "stderr",
233
  "output_type": "stream",
234
  "text": [
235
+ "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
236
  ]
237
  }
238
  ],
239
  "source": [
240
+ "# Run the classification (ca 30 seconds classifying)\n",
241
+ "result_bart_base = classifier_bart_base(first_trip, class_labels)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  ]
243
  },
244
  {
245
  "cell_type": "code",
246
  "execution_count": null,
247
+ "id": "116c7ee3-2b59-4623-a416-162c487aab70",
248
  "metadata": {},
249
  "outputs": [],
250
+ "source": [
251
+ "# Run the classification (ca 1 minute classifying)\n",
252
+ "result_bart_large_mnli = classifier_bart_large_mnli(first_trip, class_labels)"
253
+ ]
254
  },
255
  {
256
  "cell_type": "code",
257
  "execution_count": null,
258
+ "id": "8591425b-ce55-4a36-a4b6-70974e8d4e59",
259
  "metadata": {},
260
  "outputs": [],
261
+ "source": [
262
+ "# Creating a table\n",
263
+ "table = zip(result_bart_base[\"labels\"], \n",
264
+ " result_bart_large_mnli[\"labels\"])\n",
265
+ "headers = [\"bart_base\", \"bart_large_mnli\"]\n",
266
+ "\n",
267
+ "print(tabulate(table, headers=headers, tablefmt=\"grid\"))\n"
268
+ ]
269
+ },
270
+ {
271
+ "cell_type": "markdown",
272
+ "id": "21a35d0c-9451-433a-b14c-87e8dac21d68",
273
+ "metadata": {},
274
+ "source": [
275
+ "**Try simple prompt engineering**"
276
+ ]
277
  },
278
  {
279
  "cell_type": "code",
 
282
  "metadata": {},
283
  "outputs": [],
284
  "source": [
 
 
 
285
  "# No prompt\n",
286
+ "no_prompt = input_text\n",
287
+ "no_result = classifier(no_prompt, class_labels)\n",
 
288
  "\n",
289
  "# Simple prompt\n",
290
+ "simple_prompt = \"Classify the following text: \" + input_text\n",
291
+ "simple_result = classifier(simple_prompt, class_labels)\n",
292
  "\n",
293
  "# Primed prompt\n",
294
+ "primed_prompt = input_text + \"What are the most important things to pack for the trip?\"\n",
295
+ "primed_result = classifier(primed_prompt, class_labels)"
296
  ]
297
  },
298
  {
 
490
  }
491
  ],
492
  "source": [
 
 
 
493
  "# Creating a table\n",
494
  "table = zip(no_result[\"labels\"], no_result[\"scores\"], \n",
495
  " simple_result[\"labels\"], simple_result[\"scores\"], \n",
 
498
  "\n",
499
  "print(tabulate(table, headers=headers, tablefmt=\"grid\"))\n"
500
  ]
 
 
 
 
 
 
 
 
501
  }
502
  ],
503
  "metadata": {
space/space/space/space/space/space/space/space/space/.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -1,25 +1,25 @@
1
- import gradio as gr
2
  from transformers import pipeline
 
3
 
4
- # Initialize the zero-shot classification pipeline
5
  classifier = pipeline("zero-shot-classification", model="facebook/bart-base")
6
 
7
- # Define the classification function
8
- def classify_text(text, labels):
9
- labels = labels.split(",") # Convert the comma-separated string into a list
10
- result = classifier(text, candidate_labels=labels)
11
- return result
12
 
13
- # Set up the Gradio interface
14
- with gr.Blocks() as demo:
15
- gr.Markdown("# Zero-Shot Classification")
16
- text_input = gr.Textbox(label="Input Text")
17
- label_input = gr.Textbox(label="Comma-separated Labels")
18
- output = gr.JSON(label="Result")
19
- classify_button = gr.Button("Classify")
20
 
21
- # Link the button to the classification function
22
- classify_button.click(classify_text, inputs=[text_input, label_input], outputs=output)
 
 
 
 
 
23
 
24
- # Launch the Gradio interface
25
- demo.launch()
 
 
 
1
  from transformers import pipeline
2
+ import gradio as gr
3
 
4
+ # Load the model and create a pipeline for zero-shot classification
5
  classifier = pipeline("zero-shot-classification", model="facebook/bart-base")
6
 
7
+ # Load labels from a txt file
8
+ with open("labels.txt", "r", encoding="utf-8") as f:
9
+ class_labels = [line.strip() for line in f if line.strip()]
 
 
10
 
11
+ # Define the Gradio interface
12
+ def classify(text):
13
+ return classifier(text, class_labels)
 
 
 
 
14
 
15
+ demo = gr.Interface(
16
+ fn=classify,
17
+ inputs="text",
18
+ outputs="json",
19
+ title="Zero-Shot Classification",
20
+ description="Enter a text describing your trip",
21
+ )
22
 
23
+ # Launch the Gradio app
24
+ if __name__ == "__main__":
25
+ demo.launch()
space/space/space/space/space/space/space/space/space/.ipynb_checkpoints/gradio_tryout-checkpoint.ipynb CHANGED
@@ -1,6 +1,186 @@
1
  {
2
- "cells": [],
3
- "metadata": {},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "nbformat": 4,
5
  "nbformat_minor": 5
6
  }
 
1
  {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "e25090fa-f990-4f1a-84f3-b12159eedae8",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Try out gradio"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "markdown",
13
+ "id": "3bbee2e4-55c8-4b06-9929-72026edf7932",
14
+ "metadata": {},
15
+ "source": [
16
+ "Try model"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 1,
22
+ "id": "fa0d8126-e346-4412-9197-7d51baf868da",
23
+ "metadata": {
24
+ "scrolled": true
25
+ },
26
+ "outputs": [
27
+ {
28
+ "name": "stderr",
29
+ "output_type": "stream",
30
+ "text": [
31
+ "Some weights of BartForSequenceClassification were not initialized from the model checkpoint at facebook/bart-base and are newly initialized: ['classification_head.dense.bias', 'classification_head.dense.weight', 'classification_head.out_proj.bias', 'classification_head.out_proj.weight']\n",
32
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
33
+ "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n",
34
+ "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to -1. Define a descriptive label2id mapping in the model config to ensure correct outputs.\n",
35
+ "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
36
+ ]
37
+ },
38
+ {
39
+ "name": "stdout",
40
+ "output_type": "stream",
41
+ "text": [
42
+ "{'sequence': 'I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.', 'labels': ['Map', 'Compass', 'Laptop', 'Car charger', 'Toys', 'Travel crib', 'Hat', 'Playlist', 'Stroller', 'Currency', 'Travel adapter', 'Hostel lock', 'Pen', 'Charging cables', 'Flip-flops', 'Pacifier', 'Camping stove', 'Multi-tool', 'Passport', 'Poncho', 'Hiking boots', 'Portable charger', 'Power bank', 'Trekking poles', 'Snowboard', 'Base layers', 'Bandana', 'Aloe vera gel', 'Gloves', 'Baby blanket', 'Tent', 'Tent', 'Snorkel gear', 'Water filter', 'Diapers', 'Presentation materials', 'Nursing cover', 'Headphones', 'Sunscreen', 'Beach towel', 'Snacks', 'Ski jacket', 'Earplugs', 'Ski goggles', 'Flashlight', 'Neck wallet', 'Swimsuit', 'Notebook', 'Thermal clothing', 'Blanket', 'Snow boots', 'Sleeping bag', 'Lightweight backpack', 'Refillable water bottle', 'Quick-dry towel', 'Comfortable shoes', 'Reusable shopping bags', 'Travel journal', 'Travel pillow', 'Beach bag', 'Reusable coffee mug', 'Reusable water bottle', 'Festival tickets', 'Waterproof phone case', 'Business attire', 'Sunglasses', 'Sunglasses', 'Cooler', 'Baby clothes', 'Fanny pack', 'Beanie', 'First aid kit', 'Emergency roadside kit', 'Dry bag', 'SIM card', 'Energy bars', 'Baby food', 'Work ID badge', 'Packable rain jacket', 'Hand warmers', 'Visa documents', 'Glow sticks', 'Bug spray', 'Travel-sized toiletries', 'Dress shoes', 'Language phrasebook', 'Baby wipes', 'Lip balm', 'Travel insurance documents'], 'scores': [0.013028442859649658, 0.012909057550132275, 0.0124660674482584, 0.012431488372385502, 0.012379261665046215, 0.012377972714602947, 0.012329353019595146, 0.012096051126718521, 0.012086767703294754, 0.011947661638259888, 0.011939236894249916, 0.011935302056372166, 0.011887168511748314, 0.011814153753221035, 0.011788924224674702, 0.011783207766711712, 0.01177265401929617, 0.011771135963499546, 0.011747810058295727, 0.011738969013094902, 0.01169698778539896, 0.01166312862187624, 0.011658026836812496, 0.011596457101404667, 0.01158847101032734, 0.011561167426407337, 0.011526867747306824, 0.01149983424693346, 0.011472185142338276, 0.011455104686319828, 0.011445573531091213, 0.011445573531091213, 0.011444379575550556, 0.011416648514568806, 0.01136692427098751, 0.011363024823367596, 0.011361461132764816, 0.011328471824526787, 0.011299548670649529, 0.011291779577732086, 0.011282541789114475, 0.01127372495830059, 0.011270811781287193, 0.011263585649430752, 0.011179029010236263, 0.011149592697620392, 0.01113132108002901, 0.011122703552246094, 0.011105425655841827, 0.011101326905190945, 0.011090466752648354, 0.011066330596804619, 0.011058374308049679, 0.011055233888328075, 0.01103114802390337, 0.011022195219993591, 0.011012199334800243, 0.01100123766809702, 0.010985593311488628, 0.010961917228996754, 0.010958753526210785, 0.010938071645796299, 0.010903625749051571, 0.010879918932914734, 0.010863620787858963, 0.010824359022080898, 0.010824359022080898, 0.010805793106555939, 0.010763236321508884, 0.010710005648434162, 0.010690474882721901, 0.010647830553352833, 0.010583569295704365, 0.010571518912911415, 0.010570857673883438, 0.010552200488746166, 0.0105352271348238, 0.010523369535803795, 0.010514546185731888, 0.010479346849024296, 0.010450395755469799, 0.010436479933559895, 0.01043587177991867, 0.010400519706308842, 0.010214710608124733, 0.010052643716335297, 0.010041419416666031, 0.010003888048231602, 0.009946384467184544]}\n"
43
+ ]
44
+ }
45
+ ],
46
+ "source": [
47
+ "from transformers import pipeline\n",
48
+ "import gradio as gr\n",
49
+ "\n",
50
+ "# Load the model and create a pipeline for zero-shot classification\n",
51
+ "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-base\")\n",
52
+ "\n",
53
+ "# Load labels from a txt file\n",
54
+ "with open(\"labels.txt\", \"r\", encoding=\"utf-8\") as f:\n",
55
+ " class_labels = [line.strip() for line in f if line.strip()]\n",
56
+ "\n",
57
+ "# Example text to classify\n",
58
+ "input_text = \"I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.\"\n",
59
+ "\n",
60
+ "# Perform classification\n",
61
+ "result = classifier(input_text, class_labels)\n",
62
+ "\n",
63
+ "print(result)"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "markdown",
68
+ "id": "8e856a9c-a66c-4c4b-b7cf-8c52abbbc6fa",
69
+ "metadata": {},
70
+ "source": [
71
+ "Use model with gradio"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": 2,
77
+ "id": "521d9118-b59d-4cc6-b637-20202eaf8f33",
78
+ "metadata": {
79
+ "scrolled": true
80
+ },
81
+ "outputs": [
82
+ {
83
+ "name": "stdout",
84
+ "output_type": "stream",
85
+ "text": [
86
+ "Running on local URL: http://127.0.0.1:7860\n",
87
+ "\n",
88
+ "To create a public link, set `share=True` in `launch()`.\n"
89
+ ]
90
+ },
91
+ {
92
+ "data": {
93
+ "text/html": [
94
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
95
+ ],
96
+ "text/plain": [
97
+ "<IPython.core.display.HTML object>"
98
+ ]
99
+ },
100
+ "metadata": {},
101
+ "output_type": "display_data"
102
+ }
103
+ ],
104
+ "source": [
105
+ "# Define the Gradio interface\n",
106
+ "def classify(text):\n",
107
+ " return classifier(text, class_labels)\n",
108
+ "\n",
109
+ "demo = gr.Interface(\n",
110
+ " fn=classify,\n",
111
+ " inputs=\"text\",\n",
112
+ " outputs=\"json\",\n",
113
+ " title=\"Zero-Shot Classification\",\n",
114
+ " description=\"Enter a text describing your trip\",\n",
115
+ ")\n",
116
+ "\n",
117
+ "# Launch the Gradio app\n",
118
+ "if __name__ == \"__main__\":\n",
119
+ " demo.launch()"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "markdown",
124
+ "id": "d6526d18-6ba6-4a66-8310-21337b832d84",
125
+ "metadata": {},
126
+ "source": [
127
+ "Simple app"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": null,
133
+ "id": "5496ded9-7294-4da4-af05-00e5846cdd04",
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "import gradio as gr\n",
138
+ "from transformers import pipeline\n",
139
+ "\n",
140
+ "# Initialize the zero-shot classification pipeline\n",
141
+ "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-base\")\n",
142
+ "\n",
143
+ "# Define the classification function\n",
144
+ "def classify_text(text, labels):\n",
145
+ " labels = labels.split(\",\") # Convert the comma-separated string into a list\n",
146
+ " result = classifier(text, candidate_labels=labels)\n",
147
+ " return result\n",
148
+ "\n",
149
+ "# Set up the Gradio interface\n",
150
+ "with gr.Blocks() as demo:\n",
151
+ " gr.Markdown(\"# Zero-Shot Classification\")\n",
152
+ " text_input = gr.Textbox(label=\"Input Text\")\n",
153
+ " label_input = gr.Textbox(label=\"Comma-separated Labels\")\n",
154
+ " output = gr.JSON(label=\"Result\")\n",
155
+ " classify_button = gr.Button(\"Classify\")\n",
156
+ "\n",
157
+ " # Link the button to the classification function\n",
158
+ " classify_button.click(classify_text, inputs=[text_input, label_input], outputs=output)\n",
159
+ "\n",
160
+ "# Launch the Gradio interface\n",
161
+ "demo.launch()"
162
+ ]
163
+ }
164
+ ],
165
+ "metadata": {
166
+ "kernelspec": {
167
+ "display_name": "Python (huggingface_env)",
168
+ "language": "python",
169
+ "name": "huggingface_env"
170
+ },
171
+ "language_info": {
172
+ "codemirror_mode": {
173
+ "name": "ipython",
174
+ "version": 3
175
+ },
176
+ "file_extension": ".py",
177
+ "mimetype": "text/x-python",
178
+ "name": "python",
179
+ "nbconvert_exporter": "python",
180
+ "pygments_lexer": "ipython3",
181
+ "version": "3.8.20"
182
+ }
183
+ },
184
  "nbformat": 4,
185
  "nbformat_minor": 5
186
  }
space/space/space/space/space/space/space/space/space/app.py CHANGED
@@ -1,25 +1,25 @@
1
- import gradio as gr
2
  from transformers import pipeline
 
3
 
4
- # Initialize the zero-shot classification pipeline
5
  classifier = pipeline("zero-shot-classification", model="facebook/bart-base")
6
 
7
- # Define the classification function
8
- def classify_text(text, labels):
9
- labels = labels.split(",") # Convert the comma-separated string into a list
10
- result = classifier(text, candidate_labels=labels)
11
- return result
12
 
13
- # Set up the Gradio interface
14
- with gr.Blocks() as demo:
15
- gr.Markdown("# Zero-Shot Classification")
16
- text_input = gr.Textbox(label="Input Text")
17
- label_input = gr.Textbox(label="Comma-separated Labels")
18
- output = gr.JSON(label="Result")
19
- classify_button = gr.Button("Classify")
20
 
21
- # Link the button to the classification function
22
- classify_button.click(classify_text, inputs=[text_input, label_input], outputs=output)
 
 
 
 
 
23
 
24
- # Launch the Gradio interface
25
- demo.launch()
 
 
 
1
  from transformers import pipeline
2
+ import gradio as gr
3
 
4
+ # Load the model and create a pipeline for zero-shot classification
5
  classifier = pipeline("zero-shot-classification", model="facebook/bart-base")
6
 
7
+ # Load labels from a txt file
8
+ with open("labels.txt", "r", encoding="utf-8") as f:
9
+ class_labels = [line.strip() for line in f if line.strip()]
 
 
10
 
11
+ # Define the Gradio interface
12
+ def classify(text):
13
+ return classifier(text, class_labels)
 
 
 
 
14
 
15
+ demo = gr.Interface(
16
+ fn=classify,
17
+ inputs="text",
18
+ outputs="json",
19
+ title="Zero-Shot Classification",
20
+ description="Enter a text describing your trip",
21
+ )
22
 
23
+ # Launch the Gradio app
24
+ if __name__ == "__main__":
25
+ demo.launch()
space/space/space/space/space/space/space/space/space/gradio_tryout.ipynb CHANGED
@@ -18,7 +18,7 @@
18
  },
19
  {
20
  "cell_type": "code",
21
- "execution_count": 6,
22
  "id": "fa0d8126-e346-4412-9197-7d51baf868da",
23
  "metadata": {
24
  "scrolled": true
@@ -39,7 +39,7 @@
39
  "name": "stdout",
40
  "output_type": "stream",
41
  "text": [
42
- "{'sequence': 'I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.', 'labels': ['Compass', 'Travel insurance documents', 'Cooler', 'Poncho', 'Comfortable shoes', 'Thermal clothing', 'Business attire', 'Stroller', 'Refillable water bottle', 'Sunscreen', 'Hiking boots', 'Trekking poles', 'Tent', 'Tent', 'Swimsuit', 'Lightweight backpack', 'Diapers', 'Pen', 'Lip balm', 'Bandana', 'Presentation materials', 'Snorkel gear', 'Sunglasses', 'Sunglasses', 'Snowboard', 'Baby wipes', 'Emergency roadside kit', 'Blanket', 'Passport', 'Aloe vera gel', 'Currency', 'Beanie', 'Hand warmers', 'Reusable shopping bags', 'Hat', 'Travel-sized toiletries', 'Waterproof phone case', 'Energy bars', 'Baby food', 'Reusable water bottle', 'Flashlight', 'Gloves', 'Baby clothes', 'Hostel lock', 'Visa documents', 'Camping stove', 'Bug spray', 'Packable rain jacket', 'Travel pillow', 'Power bank', 'Earplugs', 'Quick-dry towel', 'Reusable coffee mug', 'Travel journal', 'Fanny pack', 'Headphones', 'Notebook', 'Dress shoes', 'Nursing cover', 'Playlist', 'Base layers', 'Work ID badge', 'Festival tickets', 'Sleeping bag', 'Laptop', 'Baby blanket', 'Charging cables', 'Snow boots', 'First aid kit', 'Snacks', 'Flip-flops', 'Toys', 'Car charger', 'Ski jacket', 'Dry bag', 'Pacifier', 'Map', 'Portable charger', 'Travel crib', 'Multi-tool', 'Beach bag', 'Ski goggles', 'SIM card', 'Glow sticks', 'Beach towel', 'Travel adapter', 'Neck wallet', 'Language phrasebook', 'Water filter'], 'scores': [0.011984821408987045, 0.011970506981015205, 0.011933253146708012, 0.011915490962564945, 0.011904211714863777, 0.011892491020262241, 0.01188766211271286, 0.011866495944559574, 0.011842762120068073, 0.011789090000092983, 0.011770269833505154, 0.011769718490540981, 0.011746660806238651, 0.011746660806238651, 0.011718676425516605, 0.01164235919713974, 0.011551206931471825, 0.011529732495546341, 0.011518468149006367, 0.011516833677887917, 0.011508049443364143, 0.011507270857691765, 0.01149584911763668, 0.01149584911763668, 0.011495097540318966, 0.01149324607104063, 0.011486946605145931, 0.01148668210953474, 0.011478666216135025, 0.011473646387457848, 0.011412998661398888, 0.011398673988878727, 0.011378799565136433, 0.01135518029332161, 0.011335738934576511, 0.011330211535096169, 0.011329339817166328, 0.011324702762067318, 0.01131915021687746, 0.01131164189428091, 0.011294065974652767, 0.011273612268269062, 0.011272135190665722, 0.011252084746956825, 0.01122584380209446, 0.011216048151254654, 0.011204490438103676, 0.011203117668628693, 0.01117485947906971, 0.01117344293743372, 0.011145292781293392, 0.011137993074953556, 0.011128612793982029, 0.011123239994049072, 0.011122280731797218, 0.011065744794905186, 0.011053262278437614, 0.011045967228710651, 0.011041177436709404, 0.011033336631953716, 0.01102971937507391, 0.0110141197219491, 0.01100961398333311, 0.011002525687217712, 0.010937424376606941, 0.01093329582363367, 0.010918675921857357, 0.010917853564023972, 0.010890142060816288, 0.01088369358330965, 0.010871977545320988, 0.010870742611587048, 0.010863195173442364, 0.010844682343304157, 0.01084016915410757, 0.010835953988134861, 0.010834810324013233, 0.010826902464032173, 0.010796850547194481, 0.010746038518846035, 0.010692491196095943, 0.010686952620744705, 0.010679351165890694, 0.010655333288013935, 0.010604050010442734, 0.010574583895504475, 0.010439733043313026, 0.010402928106486797, 0.010294477455317974]}\n"
43
  ]
44
  }
45
  ],
@@ -73,15 +73,17 @@
73
  },
74
  {
75
  "cell_type": "code",
76
- "execution_count": 13,
77
  "id": "521d9118-b59d-4cc6-b637-20202eaf8f33",
78
- "metadata": {},
 
 
79
  "outputs": [
80
  {
81
  "name": "stdout",
82
  "output_type": "stream",
83
  "text": [
84
- "Running on local URL: http://127.0.0.1:7866\n",
85
  "\n",
86
  "To create a public link, set `share=True` in `launch()`.\n"
87
  ]
@@ -89,7 +91,7 @@
89
  {
90
  "data": {
91
  "text/html": [
92
- "<div><iframe src=\"http://127.0.0.1:7866/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
93
  ],
94
  "text/plain": [
95
  "<IPython.core.display.HTML object>"
@@ -116,6 +118,48 @@
116
  "if __name__ == \"__main__\":\n",
117
  " demo.launch()"
118
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  }
120
  ],
121
  "metadata": {
 
18
  },
19
  {
20
  "cell_type": "code",
21
+ "execution_count": 1,
22
  "id": "fa0d8126-e346-4412-9197-7d51baf868da",
23
  "metadata": {
24
  "scrolled": true
 
39
  "name": "stdout",
40
  "output_type": "stream",
41
  "text": [
42
+ "{'sequence': 'I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.', 'labels': ['Map', 'Compass', 'Laptop', 'Car charger', 'Toys', 'Travel crib', 'Hat', 'Playlist', 'Stroller', 'Currency', 'Travel adapter', 'Hostel lock', 'Pen', 'Charging cables', 'Flip-flops', 'Pacifier', 'Camping stove', 'Multi-tool', 'Passport', 'Poncho', 'Hiking boots', 'Portable charger', 'Power bank', 'Trekking poles', 'Snowboard', 'Base layers', 'Bandana', 'Aloe vera gel', 'Gloves', 'Baby blanket', 'Tent', 'Tent', 'Snorkel gear', 'Water filter', 'Diapers', 'Presentation materials', 'Nursing cover', 'Headphones', 'Sunscreen', 'Beach towel', 'Snacks', 'Ski jacket', 'Earplugs', 'Ski goggles', 'Flashlight', 'Neck wallet', 'Swimsuit', 'Notebook', 'Thermal clothing', 'Blanket', 'Snow boots', 'Sleeping bag', 'Lightweight backpack', 'Refillable water bottle', 'Quick-dry towel', 'Comfortable shoes', 'Reusable shopping bags', 'Travel journal', 'Travel pillow', 'Beach bag', 'Reusable coffee mug', 'Reusable water bottle', 'Festival tickets', 'Waterproof phone case', 'Business attire', 'Sunglasses', 'Sunglasses', 'Cooler', 'Baby clothes', 'Fanny pack', 'Beanie', 'First aid kit', 'Emergency roadside kit', 'Dry bag', 'SIM card', 'Energy bars', 'Baby food', 'Work ID badge', 'Packable rain jacket', 'Hand warmers', 'Visa documents', 'Glow sticks', 'Bug spray', 'Travel-sized toiletries', 'Dress shoes', 'Language phrasebook', 'Baby wipes', 'Lip balm', 'Travel insurance documents'], 'scores': [0.013028442859649658, 0.012909057550132275, 0.0124660674482584, 0.012431488372385502, 0.012379261665046215, 0.012377972714602947, 0.012329353019595146, 0.012096051126718521, 0.012086767703294754, 0.011947661638259888, 0.011939236894249916, 0.011935302056372166, 0.011887168511748314, 0.011814153753221035, 0.011788924224674702, 0.011783207766711712, 0.01177265401929617, 0.011771135963499546, 0.011747810058295727, 0.011738969013094902, 0.01169698778539896, 0.01166312862187624, 0.011658026836812496, 0.011596457101404667, 0.01158847101032734, 0.011561167426407337, 0.011526867747306824, 0.01149983424693346, 0.011472185142338276, 0.011455104686319828, 0.011445573531091213, 0.011445573531091213, 0.011444379575550556, 0.011416648514568806, 0.01136692427098751, 0.011363024823367596, 0.011361461132764816, 0.011328471824526787, 0.011299548670649529, 0.011291779577732086, 0.011282541789114475, 0.01127372495830059, 0.011270811781287193, 0.011263585649430752, 0.011179029010236263, 0.011149592697620392, 0.01113132108002901, 0.011122703552246094, 0.011105425655841827, 0.011101326905190945, 0.011090466752648354, 0.011066330596804619, 0.011058374308049679, 0.011055233888328075, 0.01103114802390337, 0.011022195219993591, 0.011012199334800243, 0.01100123766809702, 0.010985593311488628, 0.010961917228996754, 0.010958753526210785, 0.010938071645796299, 0.010903625749051571, 0.010879918932914734, 0.010863620787858963, 0.010824359022080898, 0.010824359022080898, 0.010805793106555939, 0.010763236321508884, 0.010710005648434162, 0.010690474882721901, 0.010647830553352833, 0.010583569295704365, 0.010571518912911415, 0.010570857673883438, 0.010552200488746166, 0.0105352271348238, 0.010523369535803795, 0.010514546185731888, 0.010479346849024296, 0.010450395755469799, 0.010436479933559895, 0.01043587177991867, 0.010400519706308842, 0.010214710608124733, 0.010052643716335297, 0.010041419416666031, 0.010003888048231602, 0.009946384467184544]}\n"
43
  ]
44
  }
45
  ],
 
73
  },
74
  {
75
  "cell_type": "code",
76
+ "execution_count": 2,
77
  "id": "521d9118-b59d-4cc6-b637-20202eaf8f33",
78
+ "metadata": {
79
+ "scrolled": true
80
+ },
81
  "outputs": [
82
  {
83
  "name": "stdout",
84
  "output_type": "stream",
85
  "text": [
86
+ "Running on local URL: http://127.0.0.1:7860\n",
87
  "\n",
88
  "To create a public link, set `share=True` in `launch()`.\n"
89
  ]
 
91
  {
92
  "data": {
93
  "text/html": [
94
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
95
  ],
96
  "text/plain": [
97
  "<IPython.core.display.HTML object>"
 
118
  "if __name__ == \"__main__\":\n",
119
  " demo.launch()"
120
  ]
121
+ },
122
+ {
123
+ "cell_type": "markdown",
124
+ "id": "d6526d18-6ba6-4a66-8310-21337b832d84",
125
+ "metadata": {},
126
+ "source": [
127
+ "Simple app"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": null,
133
+ "id": "5496ded9-7294-4da4-af05-00e5846cdd04",
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "import gradio as gr\n",
138
+ "from transformers import pipeline\n",
139
+ "\n",
140
+ "# Initialize the zero-shot classification pipeline\n",
141
+ "classifier = pipeline(\"zero-shot-classification\", model=\"facebook/bart-base\")\n",
142
+ "\n",
143
+ "# Define the classification function\n",
144
+ "def classify_text(text, labels):\n",
145
+ " labels = labels.split(\",\") # Convert the comma-separated string into a list\n",
146
+ " result = classifier(text, candidate_labels=labels)\n",
147
+ " return result\n",
148
+ "\n",
149
+ "# Set up the Gradio interface\n",
150
+ "with gr.Blocks() as demo:\n",
151
+ " gr.Markdown(\"# Zero-Shot Classification\")\n",
152
+ " text_input = gr.Textbox(label=\"Input Text\")\n",
153
+ " label_input = gr.Textbox(label=\"Comma-separated Labels\")\n",
154
+ " output = gr.JSON(label=\"Result\")\n",
155
+ " classify_button = gr.Button(\"Classify\")\n",
156
+ "\n",
157
+ " # Link the button to the classification function\n",
158
+ " classify_button.click(classify_text, inputs=[text_input, label_input], outputs=output)\n",
159
+ "\n",
160
+ "# Launch the Gradio interface\n",
161
+ "demo.launch()"
162
+ ]
163
  }
164
  ],
165
  "metadata": {
space/space/space/space/space/space/space/space/space/packing_list_api.ipynb CHANGED
@@ -10,7 +10,7 @@
10
  },
11
  {
12
  "cell_type": "code",
13
- "execution_count": 12,
14
  "id": "05a29daa-b70e-4c7c-ba03-9ab641f424cb",
15
  "metadata": {},
16
  "outputs": [],
@@ -35,7 +35,7 @@
35
  },
36
  {
37
  "cell_type": "code",
38
- "execution_count": 13,
39
  "id": "21b4f8b6-e774-45ad-8054-bf5db2b7b07c",
40
  "metadata": {},
41
  "outputs": [
@@ -69,7 +69,7 @@
69
  },
70
  {
71
  "cell_type": "code",
72
- "execution_count": 14,
73
  "id": "c5f75916-aaf2-4ca7-8d1a-070579940952",
74
  "metadata": {},
75
  "outputs": [
@@ -114,66 +114,6 @@
114
  "print(output)"
115
  ]
116
  },
117
- {
118
- "cell_type": "markdown",
119
- "id": "8a6318c1-fa5f-4d16-8507-eaebe6294ac0",
120
- "metadata": {},
121
- "source": [
122
- "**Use batches of 10 labels and combine results**"
123
- ]
124
- },
125
- {
126
- "cell_type": "code",
127
- "execution_count": 16,
128
- "id": "fe42a222-5ff4-4442-93f4-42fc22001af6",
129
- "metadata": {},
130
- "outputs": [
131
- {
132
- "name": "stdout",
133
- "output_type": "stream",
134
- "text": [
135
- "{'sequence': \"I'm going on a 2-week hiking trip in the Alps during winter.\", 'labels': ['Map', 'Backpack', 'Tent', 'Thermal clothing', 'Hiking boots', 'Flashlight', 'Gloves', 'Camping stove', 'Water filter', 'Sleeping bag'], 'scores': [0.30358555912971497, 0.12884855270385742, 0.10985139012336731, 0.10500500351190567, 0.10141848027706146, 0.08342219144105911, 0.0704946368932724, 0.05127469450235367, 0.024876652285456657, 0.021222807466983795]}\n",
136
- "{'sequence': \"I'm going on a 2-week hiking trip in the Alps during winter.\", 'labels': ['Ski jacket', 'Snow boots', 'Hand warmers', 'Beanie', 'Ski goggles', 'Flip-flops', 'First aid kit', 'Sunscreen', 'Swimsuit', 'Lip balm'], 'scores': [0.20171622931957245, 0.1621972620487213, 0.12313881516456604, 0.10742709040641785, 0.09418268501758575, 0.08230196684598923, 0.07371978461742401, 0.06208840385079384, 0.05506424233317375, 0.038163457065820694]}\n",
137
- "\n",
138
- "Recommended packing list: ['Map', 'Backpack', 'Tent', 'Thermal clothing', 'Hiking boots', 'Ski jacket', 'Snow boots', 'Hand warmers', 'Beanie']\n"
139
- ]
140
- }
141
- ],
142
- "source": [
143
- "\n",
144
- "input_text = \"I'm going on a 2-week hiking trip in the Alps during winter.\"\n",
145
- "\n",
146
- "\n",
147
- "# Define the full list of possible packing items (split into groups of 10)\n",
148
- "candidate_labels = [\n",
149
- " [\"Hiking boots\", \"Tent\", \"Sleeping bag\", \"Camping stove\", \"Backpack\",\n",
150
- " \"Water filter\", \"Flashlight\", \"Thermal clothing\", \"Gloves\", \"Map\"],\n",
151
- " \n",
152
- " [\"Swimsuit\", \"Sunscreen\", \"Flip-flops\", \"Ski jacket\", \"Ski goggles\",\n",
153
- " \"Snow boots\", \"Beanie\", \"Hand warmers\", \"Lip balm\", \"First aid kit\"]\n",
154
- "]\n",
155
- "\n",
156
- "# Run classification in batches\n",
157
- "packing_list = []\n",
158
- "for batch in candidate_labels:\n",
159
- " result = query({\"inputs\": input_text, \"parameters\": {\"candidate_labels\": batch}})\n",
160
- " print(result)\n",
161
- " for label, score in zip(result[\"labels\"], result[\"scores\"]):\n",
162
- " if score > 0.1: # Adjust threshold as needed\n",
163
- " packing_list.append(label)\n",
164
- "\n",
165
- "# Print the final packing list\n",
166
- "print(\"\\nRecommended packing list:\", packing_list)"
167
- ]
168
- },
169
- {
170
- "cell_type": "code",
171
- "execution_count": null,
172
- "id": "660072ea-b72f-4bee-a9ed-81019775ae85",
173
- "metadata": {},
174
- "outputs": [],
175
- "source": []
176
- },
177
  {
178
  "cell_type": "markdown",
179
  "id": "edf44387-d166-4e0f-a8ad-621230aee115",
@@ -184,92 +124,16 @@
184
  },
185
  {
186
  "cell_type": "code",
187
- "execution_count": 1,
188
  "id": "d0d8f7c0-c2d9-4fbe-b1a7-699a5b99466c",
189
  "metadata": {},
190
  "outputs": [
191
- {
192
- "data": {
193
- "application/vnd.jupyter.widget-view+json": {
194
- "model_id": "5e371dee58d64e7b8bf6635e0e88f8db",
195
- "version_major": 2,
196
- "version_minor": 0
197
- },
198
- "text/plain": [
199
- "config.json: 0%| | 0.00/1.72k [00:00<?, ?B/s]"
200
- ]
201
- },
202
- "metadata": {},
203
- "output_type": "display_data"
204
- },
205
- {
206
- "data": {
207
- "application/vnd.jupyter.widget-view+json": {
208
- "model_id": "d479e18a65314ad5927ea2bf7453db7c",
209
- "version_major": 2,
210
- "version_minor": 0
211
- },
212
- "text/plain": [
213
- "model.safetensors: 0%| | 0.00/558M [00:00<?, ?B/s]"
214
- ]
215
- },
216
- "metadata": {},
217
- "output_type": "display_data"
218
- },
219
  {
220
  "name": "stderr",
221
  "output_type": "stream",
222
  "text": [
223
  "Some weights of BartForSequenceClassification were not initialized from the model checkpoint at facebook/bart-base and are newly initialized: ['classification_head.dense.bias', 'classification_head.dense.weight', 'classification_head.out_proj.bias', 'classification_head.out_proj.weight']\n",
224
- "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
225
- ]
226
- },
227
- {
228
- "data": {
229
- "application/vnd.jupyter.widget-view+json": {
230
- "model_id": "8dc911686edb4b15baa880ae657c163d",
231
- "version_major": 2,
232
- "version_minor": 0
233
- },
234
- "text/plain": [
235
- "vocab.json: 0%| | 0.00/899k [00:00<?, ?B/s]"
236
- ]
237
- },
238
- "metadata": {},
239
- "output_type": "display_data"
240
- },
241
- {
242
- "data": {
243
- "application/vnd.jupyter.widget-view+json": {
244
- "model_id": "e60a6df28292441bb5317ef80c9de795",
245
- "version_major": 2,
246
- "version_minor": 0
247
- },
248
- "text/plain": [
249
- "merges.txt: 0%| | 0.00/456k [00:00<?, ?B/s]"
250
- ]
251
- },
252
- "metadata": {},
253
- "output_type": "display_data"
254
- },
255
- {
256
- "data": {
257
- "application/vnd.jupyter.widget-view+json": {
258
- "model_id": "c7eaab50789b42a796d0deb3008f247e",
259
- "version_major": 2,
260
- "version_minor": 0
261
- },
262
- "text/plain": [
263
- "tokenizer.json: 0%| | 0.00/1.36M [00:00<?, ?B/s]"
264
- ]
265
- },
266
- "metadata": {},
267
- "output_type": "display_data"
268
- },
269
- {
270
- "name": "stderr",
271
- "output_type": "stream",
272
- "text": [
273
  "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n",
274
  "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to -1. Define a descriptive label2id mapping in the model config to ensure correct outputs.\n"
275
  ]
@@ -284,7 +148,7 @@
284
  },
285
  {
286
  "cell_type": "code",
287
- "execution_count": 2,
288
  "id": "4682d620-c9a6-40ad-ab4c-268ee0ef7212",
289
  "metadata": {},
290
  "outputs": [
@@ -299,7 +163,7 @@
299
  "name": "stdout",
300
  "output_type": "stream",
301
  "text": [
302
- "{'sequence': 'I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.', 'labels': ['Travel-sized toiletries', 'Refillable water bottle', 'Aloe vera gel', 'Snorkel gear', 'Waterproof phone case', 'Packable rain jacket', 'Reusable shopping bags', 'Reusable coffee mug', 'Reusable water bottle', 'First aid kit', 'Travel insurance documents', 'Work ID badge', 'Lightweight backpack', 'Presentation materials', 'Flip-flops', 'Charging cables', 'Hiking boots', 'Comfortable shoes', 'Fanny pack', 'Trekking poles', 'Visa documents', 'Baby wipes', 'Quick-dry towel', 'Baby blanket', 'Hostel lock', 'Blanket', 'Business attire', 'Laptop', 'Beanie', 'Bug spray', 'Travel pillow', 'Baby clothes', 'Passport', 'Earplugs', 'Camping stove', 'Travel journal', 'Emergency roadside kit', 'Baby food', 'Pen', 'Bandana', 'Dress shoes', 'Snacks', 'Travel crib', 'Sunscreen', 'Ski goggles', 'Sunglasses', 'Sunglasses', 'Stroller', 'Lip balm', 'Notebook', 'Glow sticks', 'Cooler', 'Snowboard', 'Map', 'Thermal clothing', 'Neck wallet', 'Water filter', 'Travel adapter', 'Currency', 'Nursing cover', 'Snow boots', 'Pacifier', 'Sleeping bag', 'Car charger', 'Diapers', 'Flashlight', 'Ski jacket', 'Portable charger', 'Playlist', 'Swimsuit', 'Tent', 'Tent', 'SIM card', 'Compass', 'Multi-tool', 'Hat', 'Base layers', 'Energy bars', 'Toys', 'Power bank', 'Dry bag', 'Beach towel', 'Beach bag', 'Poncho', 'Headphones', 'Gloves', 'Festival tickets', 'Hand warmers', 'Language phrasebook'], 'scores': [0.014162097126245499, 0.013634984381496906, 0.013528786599636078, 0.013522890396416187, 0.013521893881261349, 0.013390542939305305, 0.013313423842191696, 0.01292099617421627, 0.01269496325403452, 0.01249685138463974, 0.012418625876307487, 0.012351310811936855, 0.012286719866096973, 0.012170663103461266, 0.01216645073145628, 0.012136084027588367, 0.012111806310713291, 0.01203493494540453, 0.011913969181478024, 0.011860690079629421, 0.01184084452688694, 0.011729727499186993, 0.0116303451359272, 0.011585962027311325, 0.011557267978787422, 0.011486714705824852, 0.011480122804641724, 0.011266479268670082, 0.011243777349591255, 0.011239712126553059, 0.011195540428161621, 0.011194570921361446, 0.01118150819092989, 0.011168110184371471, 0.011141857132315636, 0.01114004384726286, 0.011128030717372894, 0.0110848443582654, 0.01107991486787796, 0.01107126846909523, 0.011069754138588905, 0.011015287600457668, 0.01101327408105135, 0.010999458841979504, 0.010981021448969841, 0.010975920595228672, 0.010975920595228672, 0.010966054163873196, 0.010964509099721909, 0.01093060988932848, 0.010892837308347225, 0.010852692648768425, 0.010844447650015354, 0.010827522724866867, 0.010805405676364899, 0.010789167135953903, 0.010784591548144817, 0.010779209434986115, 0.010761956684291363, 0.010743752121925354, 0.010727204382419586, 0.010722712613642216, 0.010696588084101677, 0.01069594919681549, 0.010669016279280186, 0.010664715431630611, 0.010641842149198055, 0.01063066441565752, 0.010608346201479435, 0.010583184659481049, 0.010549037717282772, 0.010549037717282772, 0.010522513650357723, 0.010509520769119263, 0.010469724424183369, 0.010431424714624882, 0.010407780297100544, 0.010376540012657642, 0.01036670058965683, 0.010329049080610275, 0.010298855602741241, 0.01027328334748745, 0.010225902311503887, 0.010063442401587963, 0.01005304791033268, 0.010049044154584408, 0.009841262362897396, 0.009678435511887074, 0.009306504391133785]}\n"
303
  ]
304
  }
305
  ],
 
10
  },
11
  {
12
  "cell_type": "code",
13
+ "execution_count": 2,
14
  "id": "05a29daa-b70e-4c7c-ba03-9ab641f424cb",
15
  "metadata": {},
16
  "outputs": [],
 
35
  },
36
  {
37
  "cell_type": "code",
38
+ "execution_count": 3,
39
  "id": "21b4f8b6-e774-45ad-8054-bf5db2b7b07c",
40
  "metadata": {},
41
  "outputs": [
 
69
  },
70
  {
71
  "cell_type": "code",
72
+ "execution_count": 4,
73
  "id": "c5f75916-aaf2-4ca7-8d1a-070579940952",
74
  "metadata": {},
75
  "outputs": [
 
114
  "print(output)"
115
  ]
116
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  {
118
  "cell_type": "markdown",
119
  "id": "edf44387-d166-4e0f-a8ad-621230aee115",
 
124
  },
125
  {
126
  "cell_type": "code",
127
+ "execution_count": 5,
128
  "id": "d0d8f7c0-c2d9-4fbe-b1a7-699a5b99466c",
129
  "metadata": {},
130
  "outputs": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  {
132
  "name": "stderr",
133
  "output_type": "stream",
134
  "text": [
135
  "Some weights of BartForSequenceClassification were not initialized from the model checkpoint at facebook/bart-base and are newly initialized: ['classification_head.dense.bias', 'classification_head.dense.weight', 'classification_head.out_proj.bias', 'classification_head.out_proj.weight']\n",
136
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n",
138
  "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to -1. Define a descriptive label2id mapping in the model config to ensure correct outputs.\n"
139
  ]
 
148
  },
149
  {
150
  "cell_type": "code",
151
+ "execution_count": 6,
152
  "id": "4682d620-c9a6-40ad-ab4c-268ee0ef7212",
153
  "metadata": {},
154
  "outputs": [
 
163
  "name": "stdout",
164
  "output_type": "stream",
165
  "text": [
166
+ "{'sequence': 'I like to cycle and I burn easily. I also love culture and like to post on social media about my food. I will go on a trip to italy in july.', 'labels': ['Swimsuit', 'Travel crib', 'Business attire', 'Toys', 'Notebook', 'Travel adapter', 'Compass', 'Travel pillow', 'Headphones', 'Travel journal', 'Playlist', 'Flip-flops', 'Hiking boots', 'Reusable coffee mug', 'Comfortable shoes', 'Nursing cover', 'Gloves', 'Tent', 'Tent', 'Sunglasses', 'Sunglasses', 'Charging cables', 'Travel-sized toiletries', 'Refillable water bottle', 'Energy bars', 'Dress shoes', 'Festival tickets', 'Lightweight backpack', 'Packable rain jacket', 'Flashlight', 'Hostel lock', 'Presentation materials', 'Thermal clothing', 'Snowboard', 'Camping stove', 'Reusable shopping bags', 'Reusable water bottle', 'Blanket', 'Diapers', 'Snorkel gear', 'Snacks', 'Emergency roadside kit', 'Beach towel', 'Sunscreen', 'Car charger', 'Bug spray', 'Passport', 'Currency', 'Beach bag', 'Ski jacket', 'First aid kit', 'Cooler', 'Quick-dry towel', 'Laptop', 'Aloe vera gel', 'Earplugs', 'Baby wipes', 'Ski goggles', 'Travel insurance documents', 'Portable charger', 'Beanie', 'Bandana', 'Multi-tool', 'Pacifier', 'Stroller', 'Language phrasebook', 'Waterproof phone case', 'Dry bag', 'Map', 'Lip balm', 'Fanny pack', 'Trekking poles', 'Power bank', 'Baby clothes', 'Baby food', 'Poncho', 'Sleeping bag', 'Work ID badge', 'Visa documents', 'SIM card', 'Water filter', 'Snow boots', 'Hand warmers', 'Baby blanket', 'Base layers', 'Pen', 'Hat', 'Neck wallet', 'Glow sticks'], 'scores': [0.012542711570858955, 0.012216676957905293, 0.012068654410541058, 0.011977529153227806, 0.011932261288166046, 0.011920000426471233, 0.011883101426064968, 0.011842883192002773, 0.011819617822766304, 0.011810989119112492, 0.011761271394789219, 0.011756575666368008, 0.011726364493370056, 0.011664840392768383, 0.011632450856268406, 0.01163020171225071, 0.01158054918050766, 0.011572858318686485, 0.011572858318686485, 0.011541635729372501, 0.011541635729372501, 0.011517350561916828, 0.011510960757732391, 0.011489875614643097, 0.011469963937997818, 0.011466587893664837, 0.011442759074270725, 0.011438597925007343, 0.011437375098466873, 0.011433145962655544, 0.011407203041017056, 0.011401104740798473, 0.01135423593223095, 0.011333385482430458, 0.011328010819852352, 0.011325137689709663, 0.01131997536867857, 0.011306566186249256, 0.011299673467874527, 0.011281789280474186, 0.011264320462942123, 0.011257764883339405, 0.011256475001573563, 0.011253912933170795, 0.011252702213823795, 0.011248898692429066, 0.011247594840824604, 0.011239985004067421, 0.01121864840388298, 0.011208567768335342, 0.011174682527780533, 0.011166973039507866, 0.011159253306686878, 0.011151333339512348, 0.011140624061226845, 0.011139076203107834, 0.01113345380872488, 0.011126152239739895, 0.011093570850789547, 0.011078842915594578, 0.011067545972764492, 0.011044573038816452, 0.01101986039429903, 0.011016158387064934, 0.011015082709491253, 0.011007890105247498, 0.010997296310961246, 0.010962157510221004, 0.01095755398273468, 0.010940180160105228, 0.01088095735758543, 0.010869039222598076, 0.010858545079827309, 0.010820968076586723, 0.01080892514437437, 0.010798529721796513, 0.01077410951256752, 0.010764310136437416, 0.010748079977929592, 0.010681436397135258, 0.010675576515495777, 0.010557047091424465, 0.010552684776484966, 0.010509641841053963, 0.010396942496299744, 0.01037551462650299, 0.01033466774970293, 0.010237698443233967, 0.009954877197742462]}\n"
167
  ]
168
  }
169
  ],