Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
10K<n<100K
License:
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| # Lint as: python3 | |
| """IMDb movie revies dataset mixed with Trip Advisor Hotel Reviews to simulate drift accross time.""" | |
| import csv | |
| import json | |
| import os | |
| import datasets | |
| # TODO: Add BibTeX citation to our BLOG | |
| # Find for instance the citation on arxiv or on the dataset repo/website | |
| _CITATION = "" | |
| # _CITATION = """\ | |
| # @InProceedings{huggingface:dataset, | |
| # title = {A great new dataset}, | |
| # author={huggingface, Inc. | |
| # }, | |
| # year={2020} | |
| # } | |
| # """ | |
| # TODO: Add description of the dataset here | |
| # You can copy an official description | |
| _DESCRIPTION = """\ | |
| This dataset was crafted to be used in our tutorial [Link to the tutorial when | |
| ready]. It consists on product reviews from an e-commerce store. The reviews | |
| are labeled on a scale from 1 to 5 (stars). The training & validation sets are | |
| fully composed by reviews written in english. However, the production set has | |
| some reviews written in spanish. At Arize, we work to surface this issue and | |
| help you solve it. | |
| """ | |
| # TODO: Add a link to an official homepage for the dataset here | |
| _HOMEPAGE = "" | |
| # TODO: Add the licence for the dataset here if you can find it | |
| _LICENSE = "" | |
| # TODO: Add link to the official dataset URLs here | |
| # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
| # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
| _URL = "https://huggingface.co/datasets/arize-ai/xtreme_en/resolve/main/" | |
| _URLS = { | |
| "training": _URL + "training.csv", | |
| "validation": _URL + "validation.csv", | |
| "production": _URL + "production.csv", | |
| } | |
| # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case | |
| class XtremeEn(datasets.GeneratorBasedBuilder): | |
| """TODO: Short description of my dataset.""" | |
| VERSION = datasets.Version("1.0.0") | |
| # This is an example of a dataset with multiple configurations. | |
| # If you don't want/need to define several sub-sets in your dataset, | |
| # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
| # If you need to make complex sub-parts in the datasets with configurable options | |
| # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
| # BUILDER_CONFIG_CLASS = MyBuilderConfig | |
| # You will be able to load one or the other configurations in the following list with | |
| # data = datasets.load_dataset('my_dataset', 'first_domain') | |
| # data = datasets.load_dataset('my_dataset', 'second_domain') | |
| BUILDER_CONFIGS = [ | |
| datasets.BuilderConfig(name="default", version=VERSION, description="Default"), | |
| ] | |
| DEFAULT_CONFIG_NAME = "default" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
| def _info(self): | |
| # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
| features = datasets.Features( | |
| # These are the features of your dataset like images, labels ... | |
| { | |
| "prediction_ts": datasets.Value("float"), | |
| "language":datasets.Value("string"), | |
| "split_text": datasets.Sequence(datasets.Value("string")), | |
| "ner_tags": datasets.Sequence( | |
| datasets.features.ClassLabel( | |
| names=[ | |
| "O", | |
| "B-PER", | |
| "I-PER", | |
| "B-ORG", | |
| "I-ORG", | |
| "B-LOC", | |
| "I-LOC", | |
| ] | |
| ) | |
| ), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| # This is the description that will appear on the datasets page. | |
| description=_DESCRIPTION, | |
| # This defines the different columns of the dataset and their types | |
| features=features, # Here we define them above because they are different between the two configurations | |
| # Homepage of the dataset for documentation | |
| # License for the dataset if available | |
| license=_LICENSE, | |
| # Citation for the dataset | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
| # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
| # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
| # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
| # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
| extracted_paths = dl_manager.download_and_extract(_URLS) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split("training"), | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": extracted_paths['training'], | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split("validation"), | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": extracted_paths['validation'], | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split("production"), | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": extracted_paths['production'], | |
| }, | |
| ), | |
| ] | |
| # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
| def _generate_examples(self, filepath): | |
| # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
| # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
| with open(filepath) as csv_file: | |
| csv_reader = csv.reader(csv_file, delimiter='\t') | |
| for id_, row in enumerate(csv_reader): | |
| prediction_ts,language,text,ner_tags = row | |
| ner_tags_list = list(ner_tags.strip('[]').split(' ')) | |
| tokens = text.split(":-:") | |
| if id_==0: # Skip headers | |
| continue | |
| yield id_, { | |
| "prediction_ts":prediction_ts, | |
| "language":language, | |
| "split_text": tokens, | |
| "ner_tags":ner_tags_list, | |
| } |