Datasets:
Tasks:
Table to Text
Modalities:
Text
Languages:
English
Size:
10K - 100K
Tags:
data-to-text
License:
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """TODO: Add a description here.""" | |
| import csv | |
| import json | |
| import os | |
| import re | |
| import datasets | |
| _CITATION = """\ | |
| @inproceedings{puduppully-etal-2019-data, | |
| title = "Data-to-text Generation with Entity Modeling", | |
| author = "Puduppully, Ratish and | |
| Dong, Li and | |
| Lapata, Mirella", | |
| booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", | |
| month = jul, | |
| year = "2019", | |
| address = "Florence, Italy", | |
| publisher = "Association for Computational Linguistics", | |
| url = "https://www.aclweb.org/anthology/P19-1195", | |
| doi = "10.18653/v1/P19-1195", | |
| pages = "2023--2035", | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| The MLB dataset for data to text generation contains Major League Baseball games statistics and | |
| their human-written summaries. | |
| """ | |
| _HOMEPAGE = "https://github.com/ratishsp/mlb-data-scripts" | |
| _LICENSE = "" | |
| _URL = "data.zip" | |
| team_verbalization_map = {"team_errors": "<TEAM_ERRORS>", "team_hits": "<TEAM_HITS>", "team_runs": "<TEAM_RUNS>"} | |
| pitcher_verbalization_map = {"p_bb": "<PITCH-BASE-ON-BALLS>", "p_er": "<EARNED-RUN>", "p_era": "<EARNED-RUN-AVG>", | |
| "p_h": "<PITCH-HITS>", "p_hr": "<PITCH-HOME-RUN>", "p_l": "<PITCH-LOSS>", | |
| "p_loss": "<PITCH-LOSING-PITCHER>", "p_s": "<PITCH-STRIKES-THROWN>", | |
| "p_np": "<PITCH-COUNT>", "p_r": "<PITCH-RUNS>", "p_save": "<PITCH-SAVING-PITCHER>", | |
| "p_so": "<PITCH-STRIKE-OUT>", "p_bf": "<PITCH-BATTERS-FACED>", | |
| "p_bs": "<PITCH-BLOWN-SAVE>", | |
| "p_sv": "<PITCH-SAVE>", "p_w": "<PITCH-WIN>", "p_ip1": "<INNINGS-PITCHED-1>", | |
| "p_ip2": "<INNINGS-PITCHED-2>", "p_win": "<PITCH-WINNING-PITCHER>", | |
| "p_out": "<PITCH-OUT>"} | |
| batter_verbalization_map = {"h": "<HITS>", "r": "<RUNS>", "hr": "<HOME-RUN>", "ab": "<ATBAT>", "avg": "<AVG>", | |
| "rbi": "<RBI>", "cs": "<CAUGHT-STEAL>", "hbp": "<HIT-BY-PITCH>", "a": "<ASSIST>", | |
| "bb": "<BASE-ON-BALL>", "e": "<ERROR>", "obp": "<ON-BASE-PCT>", "po": "<PUTOUT>", | |
| "pos": "<POS>", "sb": "<STOLEN-BASE>", "sf": "<SAC-FLY>", "slg": "<SLUG>", | |
| "so": "<STRIKEOUT>" | |
| } | |
| pbyp_verbalization_map = {"o": "<PBYP-OUTS>", "b": "<PBYP-BALLS>", "s": "<PBYP-STRIKES>", "b1": "<PBYP-B1>", | |
| "b2": "<PBYP-B2>", "b3": "<PBYP-B3>", "batter": "<PBYP-BATTER>", | |
| "pitcher": "<PBYP-PITCHER>", | |
| "scorers": "<PBYP-SCORERS>", "event": "<PBYP-EVENT>", "event2": "<PBYP-EVENT2>", | |
| "fielder_error": "<PBYP-FIELDER-ERROR>", "runs": "<PBYP-RUNS>", "rbi": "<PBYP-RBI>", | |
| "error_runs": "<PBYP-ERROR-RUNS>", "top": "<TOP>", "bottom": "<BOTTOM>"} | |
| player_verbalization_map = dict(pitcher_verbalization_map, **batter_verbalization_map) | |
| class MlbDataToText(datasets.GeneratorBasedBuilder): | |
| """MLB dataset for data to text generation""" | |
| VERSION = datasets.Version("1.1.0") | |
| def _info(self): | |
| features = datasets.Features( | |
| { | |
| "home_name": datasets.Value("string"), | |
| "box_score": [ | |
| { | |
| "p_l": datasets.Value("string"), | |
| "last_name": datasets.Value("string"), | |
| "p_h": datasets.Value("string"), | |
| "sac": datasets.Value("string"), | |
| "p_bb": datasets.Value("string"), | |
| "pos": datasets.Value("string"), | |
| "ao": datasets.Value("string"), | |
| "p_bf": datasets.Value("string"), | |
| "cs": datasets.Value("string"), | |
| "hbp": datasets.Value("string"), | |
| "ab": datasets.Value("string"), | |
| "full_name": datasets.Value("string"), | |
| "p_w": datasets.Value("string"), | |
| "go": datasets.Value("string"), | |
| "fldg": datasets.Value("string"), | |
| "p_bs": datasets.Value("string"), | |
| "avg": datasets.Value("string"), | |
| "p_r": datasets.Value("string"), | |
| "p_s": datasets.Value("string"), | |
| "lob": datasets.Value("string"), | |
| "first_name": datasets.Value("string"), | |
| "p_sv": datasets.Value("string"), | |
| "p_so": datasets.Value("string"), | |
| "p_save": datasets.Value("string"), | |
| "p_hr": datasets.Value("string"), | |
| "po": datasets.Value("string"), | |
| "p_ip1": datasets.Value("string"), | |
| "p_ip2": datasets.Value("string"), | |
| "bb": datasets.Value("string"), | |
| "ops": datasets.Value("string"), | |
| "p_hld": datasets.Value("string"), | |
| "bo": datasets.Value("string"), | |
| "p_loss": datasets.Value("string"), | |
| "e": datasets.Value("string"), | |
| "p_game_score": datasets.Value("string"), | |
| "p_win": datasets.Value("string"), | |
| "a": datasets.Value("string"), | |
| "p_era": datasets.Value("string"), | |
| "d": datasets.Value("string"), | |
| "p_out": datasets.Value("string"), | |
| "h": datasets.Value("string"), | |
| "p_er": datasets.Value("string"), | |
| "p_np": datasets.Value("string"), | |
| "hr": datasets.Value("string"), | |
| "r": datasets.Value("string"), | |
| "so": datasets.Value("string"), | |
| "t": datasets.Value("string"), | |
| "rbi": datasets.Value("string"), | |
| "team": datasets.Value("string"), | |
| "sb": datasets.Value("string"), | |
| "slg": datasets.Value("string"), | |
| "sf": datasets.Value("string"), | |
| "obp": datasets.Value("string"), | |
| } | |
| ], | |
| "home_city": datasets.Value("string"), | |
| "vis_name": datasets.Value("string"), | |
| "play_by_play": [{ | |
| "top": [{ | |
| "runs": datasets.Value("string"), | |
| "scorers": [ | |
| datasets.Value("string") | |
| ], | |
| "pitcher": datasets.Value("string"), | |
| "o": datasets.Value("string"), | |
| "b": datasets.Value("string"), | |
| "s": datasets.Value("string"), | |
| "batter": datasets.Value("string"), | |
| "b1": [ | |
| datasets.Value("string") | |
| ], | |
| "b2": [ | |
| datasets.Value("string") | |
| ], | |
| "b3": [ | |
| datasets.Value("string") | |
| ], | |
| "event": datasets.Value("string"), | |
| "event2": datasets.Value("string"), | |
| "home_team_runs": datasets.Value("string"), | |
| "away_team_runs": datasets.Value("string"), | |
| "rbi": datasets.Value("string"), | |
| "error_runs": datasets.Value("string"), | |
| "fielder_error": datasets.Value("string") | |
| } | |
| ], | |
| "bottom": [{ | |
| "runs": datasets.Value("string"), | |
| "scorers": [ | |
| datasets.Value("string") | |
| ], | |
| "pitcher": datasets.Value("string"), | |
| "o": datasets.Value("string"), | |
| "b": datasets.Value("string"), | |
| "s": datasets.Value("string"), | |
| "batter": datasets.Value("string"), | |
| "b1": [ | |
| datasets.Value("string") | |
| ], | |
| "b2": [ | |
| datasets.Value("string") | |
| ], | |
| "b3": [ | |
| datasets.Value("string") | |
| ], | |
| "event": datasets.Value("string"), | |
| "event2": datasets.Value("string"), | |
| "home_team_runs": datasets.Value("string"), | |
| "away_team_runs": datasets.Value("string"), | |
| "rbi": datasets.Value("string"), | |
| "error_runs": datasets.Value("string"), | |
| "fielder_error": datasets.Value("string") | |
| } | |
| ], | |
| "inning": datasets.Value("string") | |
| } | |
| ], | |
| "vis_line": { | |
| "innings": [{ | |
| "inn": datasets.Value("string"), | |
| "runs": datasets.Value("string") | |
| } | |
| ], | |
| "result": datasets.Value("string"), | |
| "team_runs": datasets.Value("string"), | |
| "team_hits": datasets.Value("string"), | |
| "team_errors": datasets.Value("string"), | |
| "team_name": datasets.Value("string"), | |
| "team_city": datasets.Value("string") | |
| }, | |
| "home_line": { | |
| "innings": [{ | |
| "inn": datasets.Value("string"), | |
| "runs": datasets.Value("string") | |
| } | |
| ], | |
| "result": datasets.Value("string"), | |
| "team_runs": datasets.Value("string"), | |
| "team_hits": datasets.Value("string"), | |
| "team_errors": datasets.Value("string"), | |
| "team_name": datasets.Value("string"), | |
| "team_city": datasets.Value("string") | |
| }, | |
| "vis_city": datasets.Value("string"), | |
| "day": datasets.Value("string"), | |
| "summary": [ | |
| datasets.Value("string"), | |
| ], | |
| "summary_eval": datasets.Value("string"), | |
| "gem_id": datasets.Value("string"), | |
| "target": datasets.Value("string"), | |
| "references": [datasets.Value("string")], | |
| "linearized_input": datasets.Value("string") | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=features, | |
| supervised_keys=None, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
| # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
| # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs | |
| # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
| # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
| data_dir = dl_manager.download_and_extract(_URL) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": os.path.join(data_dir, "data", "train.jsonl"), | |
| "split": "train", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": os.path.join(data_dir, "data", "test.jsonl"), | |
| "split": "test" | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "filepath": os.path.join(data_dir, "data", "validation.jsonl"), | |
| "split": "validation", | |
| }, | |
| ), | |
| ] | |
| def tokenize_initials(self, value): | |
| attrib_value = re.sub(r"(\w)\.(\w)\.", r"\g<1>. \g<2>.", value) | |
| return attrib_value | |
| def get_team_line_attributes(self, entry, name): | |
| if name == entry["home_line"]["team_name"]: | |
| line = entry["home_line"] | |
| type = "home" | |
| elif name == entry["vis_line"]["team_name"]: | |
| line = entry["vis_line"] | |
| type = "vis" | |
| else: | |
| assert False | |
| city = line["team_city"] | |
| name = line["team_name"] | |
| result = line["result"] | |
| updated_type = "<" + type.upper() + ">" | |
| team_tup = (updated_type, name, city, result) | |
| team_line = "%s <TEAM> %s <CITY> %s <TEAM-RESULT> %s" | |
| sentence1 = team_line % (team_tup) | |
| other_attributes = [] | |
| attributes = ["team_runs", "team_hits", "team_errors"] | |
| for attrib in attributes: | |
| template_string = " ".join([team_verbalization_map[attrib], "%s"]) | |
| other_attributes.append(template_string % line[attrib]) | |
| other_attributes = " ".join(other_attributes) | |
| team_info = sentence1 | |
| if len(other_attributes) > 0: | |
| team_info = " ".join([sentence1, other_attributes]) | |
| innings = line["innings"] | |
| inning_verbalization = [] | |
| for inning in innings: | |
| inning_phrase = "<INN> %s %s" % (inning["inn"], inning["runs"]) | |
| inning_verbalization.append(inning_phrase) | |
| inning_sentence = " ".join(inning_verbalization) | |
| team_info = " ".join([team_info, inning_sentence]) | |
| return team_info | |
| def get_player_line(self, entry): | |
| players = [] | |
| for player in entry["box_score"]: | |
| if player["full_name"] == "N/A": | |
| continue | |
| player_line = "<PLAYER> %s <TEAM> %s <POS> %s" | |
| player_tup = (self.tokenize_initials(player["full_name"]), player["team"], player["pos"]) | |
| player_basic_info = player_line % (player_tup) | |
| other_attributes = [] | |
| for attrib in ["r", "h", "hr", "rbi", "e", "ab", "avg", "cs", "hbp", "bb", "sb", "sf", "so", "a", "po", | |
| "p_ip1", "p_ip2", "p_w", "p_l", "p_h", "p_r", "p_er", "p_bb", "p_so", "p_hr", "p_np", "p_s", | |
| "p_era", "p_win", "p_loss", "p_save", "p_sv", "p_bf", "p_out", "p_bs"]: | |
| if player[attrib] == "N/A": | |
| continue | |
| if attrib in ['sb', 'sf', 'e', 'po', 'a', 'cs', 'hbp', 'hr', 'so', 'bb', "p_hr", "p_sv", | |
| "p_bs"] and int(player[attrib]) == 0: | |
| continue | |
| if attrib in ['avg'] and player[attrib] == ".000": | |
| continue | |
| template_string = " ".join([player_verbalization_map[attrib], "%s"]) | |
| other_attributes.append(template_string % (player[attrib])) | |
| player_other_attributes = " ".join(other_attributes) | |
| if other_attributes: | |
| player_info = " ".join([player_basic_info, player_other_attributes]) | |
| else: | |
| player_info = player_basic_info | |
| players.append(player_info) | |
| return players | |
| def get_runs_desc(self, inning_play): | |
| obs_desc = [] | |
| for attrib in ["runs", "rbi", "error_runs"]: | |
| if attrib in inning_play and inning_play[attrib] != "N/A" and int(inning_play[attrib]) > 0: | |
| desc = " ".join([pbyp_verbalization_map[attrib], "%d"]) | |
| obs_desc.append(desc % (int(inning_play[attrib]))) | |
| return obs_desc | |
| def get_obs_desc(self, inning_play): | |
| obs_desc = [] | |
| for attrib in ["o", "b", "s"]: | |
| if attrib in inning_play: | |
| desc = " ".join([pbyp_verbalization_map[attrib], "%d"]) | |
| obs_desc.append(desc % (int(inning_play[attrib]))) | |
| return obs_desc | |
| def get_name_desc(self, attrib, inning_play, obs_desc): | |
| if attrib in inning_play: | |
| desc = " ".join([pbyp_verbalization_map[attrib], "%s"]) | |
| attrib_value = self.tokenize_initials(inning_play[attrib]) | |
| obs_desc.append(desc % (attrib_value)) | |
| def get_name_desc_entity(self, attrib, entity_name, obs_desc): | |
| desc = " ".join([pbyp_verbalization_map[attrib], "%s"]) | |
| attrib_value = self.tokenize_initials(entity_name) | |
| obs_desc.append(desc % (attrib_value)) | |
| def get_team_scores_desc(self, away, home, inning_play, obs_desc): | |
| if "home_team_runs" in inning_play and "away_team_runs" in inning_play and inning_play[ | |
| "home_team_runs"] != "N/A" and inning_play["away_team_runs"] != "N/A": | |
| desc = "<TEAM-SCORES> %s %d %s %d" % ( | |
| home, int(inning_play["home_team_runs"]), away, int(inning_play["away_team_runs"])) | |
| obs_desc.append(desc) | |
| def get_attrib_value_desc(self, attrib, inning_play, obs_desc): | |
| if attrib in inning_play and inning_play[attrib] != "N/A": | |
| desc = " ".join([pbyp_verbalization_map[attrib], "%s"]) | |
| obs_desc.append(desc % (inning_play[attrib])) | |
| def get_play_by_play_desc(self, home, away, inning, inning_play, play_index, | |
| top_bottom): | |
| inning_line = " ".join( | |
| ["<INNING> %s", pbyp_verbalization_map[top_bottom], "<BATTING> %s <PITCHING> %s <PLAY> %d"]) | |
| if top_bottom == "top": | |
| inning_attrib = (inning, away, home, play_index) | |
| else: | |
| inning_attrib = (inning, home, away, play_index) | |
| inning_desc = inning_line % (inning_attrib) | |
| other_attrib_desc = [inning_desc] | |
| other_attrib_desc.extend(self.get_runs_desc(inning_play)) | |
| other_attrib_desc.extend(self.get_obs_desc(inning_play)) | |
| for attrib in ["batter", "pitcher", "fielder_error"]: | |
| if attrib in inning_play and inning_play[attrib] != "N/A": | |
| self.get_name_desc(attrib, inning_play, other_attrib_desc) | |
| for attrib in ["scorers", "b2", "b3"]: | |
| if attrib in inning_play and len(inning_play[attrib]) > 0 and inning_play[attrib][0] != "N/A": | |
| for baserunner_instance in inning_play[attrib]: | |
| self.get_name_desc_entity(attrib, baserunner_instance, other_attrib_desc) | |
| self.get_attrib_value_desc("event", inning_play, other_attrib_desc) | |
| self.get_attrib_value_desc("event2", inning_play, other_attrib_desc) | |
| self.get_team_scores_desc(away, home, inning_play, other_attrib_desc) | |
| return other_attrib_desc | |
| def get_play_by_play_all_entities_inning(self, inning_data, home, away, inning, side): | |
| play_by_play_desc = [] | |
| play_index = 1 | |
| inning_plays = inning_data[side] | |
| for inning_play in inning_plays: | |
| other_attrib_desc = self.get_play_by_play_desc(home, away, inning, inning_play, play_index, side) | |
| other_attrib_desc = " ".join(other_attrib_desc) | |
| play_index += 1 | |
| play_by_play_desc.append(other_attrib_desc) | |
| return play_by_play_desc | |
| def linearize_input(self, entry): | |
| output = [] | |
| output.append(self.get_team_line_attributes(entry, entry["home_line"]["team_name"])) | |
| output.append(self.get_team_line_attributes(entry, entry["vis_line"]["team_name"])) | |
| output.extend(self.get_player_line(entry)) | |
| for inning_data in entry['play_by_play']: | |
| for side in ["top", "bottom"]: | |
| pbyp_desc = self.get_play_by_play_all_entities_inning(inning_data, entry["home_line"]["team_name"], | |
| entry["vis_line"]["team_name"], inning_data['inning'], | |
| side) | |
| if pbyp_desc: | |
| output.append(" ".join(pbyp_desc)) | |
| linearized_input = " ".join(output) | |
| linearized_input = linearized_input.replace(" ", " ") | |
| return linearized_input | |
| def _generate_examples( | |
| self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
| ): | |
| """ Yields examples as (key, example) tuples. """ | |
| # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
| # The `key` is here for legacy reason (tfds) and is not important in itself. | |
| with open(filepath, encoding="utf-8") as f: | |
| for id_, row in enumerate(f): | |
| data = json.loads(row) | |
| yield id_, { | |
| "home_name": data["home_name"], | |
| "box_score": data["box_score"], | |
| "home_city": data["home_city"], | |
| "vis_name": data["vis_name"], | |
| "play_by_play": data["play_by_play"], | |
| "vis_line": data["vis_line"], | |
| "vis_city": data["vis_city"], | |
| "day": data["day"], | |
| "home_line": data["home_line"], | |
| "summary": data["summary"], | |
| "summary_eval": data["summary_eval"], | |
| "gem_id": data["gem_id"], | |
| "target": data["summary_eval"], | |
| "references": [data["summary_eval"]], | |
| "linearized_input": self.linearize_input(data) | |
| } | |