Commit 
							
							·
						
						213288b
	
0
								Parent(s):
							
							
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- c4.py +331 -0
- c4_utils.py +489 -0
    	
        .gitattributes
    ADDED
    
    | @@ -0,0 +1,27 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            *.7z filter=lfs diff=lfs merge=lfs -text
         | 
| 2 | 
            +
            *.arrow filter=lfs diff=lfs merge=lfs -text
         | 
| 3 | 
            +
            *.bin filter=lfs diff=lfs merge=lfs -text
         | 
| 4 | 
            +
            *.bin.* filter=lfs diff=lfs merge=lfs -text
         | 
| 5 | 
            +
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         | 
| 6 | 
            +
            *.ftz filter=lfs diff=lfs merge=lfs -text
         | 
| 7 | 
            +
            *.gz filter=lfs diff=lfs merge=lfs -text
         | 
| 8 | 
            +
            *.h5 filter=lfs diff=lfs merge=lfs -text
         | 
| 9 | 
            +
            *.joblib filter=lfs diff=lfs merge=lfs -text
         | 
| 10 | 
            +
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         | 
| 11 | 
            +
            *.model filter=lfs diff=lfs merge=lfs -text
         | 
| 12 | 
            +
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         | 
| 13 | 
            +
            *.onnx filter=lfs diff=lfs merge=lfs -text
         | 
| 14 | 
            +
            *.ot filter=lfs diff=lfs merge=lfs -text
         | 
| 15 | 
            +
            *.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 16 | 
            +
            *.pb filter=lfs diff=lfs merge=lfs -text
         | 
| 17 | 
            +
            *.pt filter=lfs diff=lfs merge=lfs -text
         | 
| 18 | 
            +
            *.pth filter=lfs diff=lfs merge=lfs -text
         | 
| 19 | 
            +
            *.rar filter=lfs diff=lfs merge=lfs -text
         | 
| 20 | 
            +
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 21 | 
            +
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 22 | 
            +
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 23 | 
            +
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 24 | 
            +
            *.xz filter=lfs diff=lfs merge=lfs -text
         | 
| 25 | 
            +
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
            +
            *.zstandard filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
            +
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
    	
        c4.py
    ADDED
    
    | @@ -0,0 +1,331 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # coding=utf-8
         | 
| 2 | 
            +
            # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
         | 
| 3 | 
            +
            #
         | 
| 4 | 
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         | 
| 5 | 
            +
            # you may not use this file except in compliance with the License.
         | 
| 6 | 
            +
            # You may obtain a copy of the License at
         | 
| 7 | 
            +
            #
         | 
| 8 | 
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         | 
| 9 | 
            +
            #
         | 
| 10 | 
            +
            # Unless required by applicable law or agreed to in writing, software
         | 
| 11 | 
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         | 
| 12 | 
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         | 
| 13 | 
            +
            # See the License for the specific language governing permissions and
         | 
| 14 | 
            +
            # limitations under the License.
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # Lint as: python3
         | 
| 17 | 
            +
            """C4 dataset based on Common Crawl."""
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            from __future__ import absolute_import, division, print_function
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            import json
         | 
| 22 | 
            +
            import logging
         | 
| 23 | 
            +
            import os
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            import datasets
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            from .c4_utils import (
         | 
| 28 | 
            +
                dedupe_urls,
         | 
| 29 | 
            +
                filter_by_webtextlike,
         | 
| 30 | 
            +
                get_clean_page_fn,
         | 
| 31 | 
            +
                get_counter_inc_fn,
         | 
| 32 | 
            +
                get_hashed_url_filter_fn,
         | 
| 33 | 
            +
                is_language,
         | 
| 34 | 
            +
                is_realnews_domain,
         | 
| 35 | 
            +
                is_valid_length,
         | 
| 36 | 
            +
                normalize_url,
         | 
| 37 | 
            +
                remove_duplicate_text,
         | 
| 38 | 
            +
                split_wet_file,
         | 
| 39 | 
            +
            )
         | 
| 40 | 
            +
             | 
| 41 | 
            +
             | 
| 42 | 
            +
            _DESCRIPTION = """\
         | 
| 43 | 
            +
            A colossal, cleaned version of Common Crawl's web crawl corpus.
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            Based on Common Crawl dataset: "https://commoncrawl.org"
         | 
| 46 | 
            +
             | 
| 47 | 
            +
            Due to the overhead of cleaning the dataset, it is recommend you prepare it with
         | 
| 48 | 
            +
            a distributed service like Cloud Dataflow. More info at
         | 
| 49 | 
            +
            https://www.tensorflow.org/datasets/beam_datasets.
         | 
| 50 | 
            +
            """
         | 
| 51 | 
            +
            _CITATION = """
         | 
| 52 | 
            +
            @article{2019t5,
         | 
| 53 | 
            +
                author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
         | 
| 54 | 
            +
                title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
         | 
| 55 | 
            +
                journal = {arXiv e-prints},
         | 
| 56 | 
            +
                year = {2019},
         | 
| 57 | 
            +
                archivePrefix = {arXiv},
         | 
| 58 | 
            +
                eprint = {1910.10683},
         | 
| 59 | 
            +
            }
         | 
| 60 | 
            +
            """
         | 
| 61 | 
            +
            _VERSION = datasets.Version("2.3.0", "Deduplicate lines within a page.")
         | 
| 62 | 
            +
             | 
| 63 | 
            +
            _DOWNLOAD_HOST = "https://commoncrawl.s3.amazonaws.com"
         | 
| 64 | 
            +
            _WET_PATH_URL = "https://commoncrawl.s3.amazonaws.com/crawl-data/CC-MAIN-{cc_version}/wet.paths.gz"
         | 
| 65 | 
            +
            _REALNEWS_DOMAINS_URL = "https://raw.githubusercontent.com/rowanz/grover/38f7184bd87237ae2d3bc330b99f1e2e246f6d51/realnews/domain_to_allowed_subdomains.json"
         | 
| 66 | 
            +
            _BADWORDS_URL = "https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/25e679f03d96baa721cde20db9944649e8d0a844/{lang}"
         | 
| 67 | 
            +
            _CHECKSUMS_URL = "https://storage.googleapis.com/tfds-data/manual_checksums/c4.txt"
         | 
| 68 | 
            +
            _OPENWEBTEXT_URLS_ZIP = "OpenWebText.zip"
         | 
| 69 | 
            +
            _OPENWEBTEXT_URLS_URL = "https://mega.nz/#F!EZZD0YwJ!9_PlEQzdMVLaNdKv_ICNVQ"
         | 
| 70 | 
            +
            _OPENWEBTEXT_URLS_FILE_PATTERN = "OpenWebText/Version 1/URLs/*.txt"
         | 
| 71 | 
            +
             | 
| 72 | 
            +
            _DEFAULT_CC_VERSIONS = ("2019-18",)  # April 2019
         | 
| 73 | 
            +
            _DEFAULT_WEBTEXTLIKE_CC_VERSIONS = (  # August 2018 - July 2019
         | 
| 74 | 
            +
                "2018-34",
         | 
| 75 | 
            +
                "2018-39",
         | 
| 76 | 
            +
                "2018-43",
         | 
| 77 | 
            +
                "2018-47",
         | 
| 78 | 
            +
                "2018-51",
         | 
| 79 | 
            +
                "2019-04",
         | 
| 80 | 
            +
                "2019-09",
         | 
| 81 | 
            +
                "2019-13",
         | 
| 82 | 
            +
                "2019-18",
         | 
| 83 | 
            +
                "2019-22",
         | 
| 84 | 
            +
                "2019-26",
         | 
| 85 | 
            +
                "2019-30",
         | 
| 86 | 
            +
            )
         | 
| 87 | 
            +
             | 
| 88 | 
            +
             | 
| 89 | 
            +
            class C4Config(datasets.BuilderConfig):
         | 
| 90 | 
            +
                """BuilderConfig for C4 dataset."""
         | 
| 91 | 
            +
             | 
| 92 | 
            +
                def __init__(self, language, cc_versions=None, clean=True, realnewslike=False, webtextlike=False, **kwargs):
         | 
| 93 | 
            +
                    """BuilderConfig for C4.
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                    Args:
         | 
| 96 | 
            +
                        language: string, the language code, or "all" to disable language
         | 
| 97 | 
            +
                            filtering.
         | 
| 98 | 
            +
                        cc_versions: tuple(string), a collection of versions of Common Crawl to
         | 
| 99 | 
            +
                            use as the raw source text. Set to None to use defaults.
         | 
| 100 | 
            +
                        clean: bool, whether to clean the dataset for badwords, duplications, etc.
         | 
| 101 | 
            +
                        realnewslike: bool, whether to limit to news domains as compiled by
         | 
| 102 | 
            +
                            RealNews.
         | 
| 103 | 
            +
                        webtextlike: bool, whether to limit to WebText-like URLs.
         | 
| 104 | 
            +
                        **kwargs: keyword arguments forwarded to super.
         | 
| 105 | 
            +
                    """
         | 
| 106 | 
            +
                    name_parts = [language]
         | 
| 107 | 
            +
                    if cc_versions:
         | 
| 108 | 
            +
                        name_parts.append("_".join(cc_versions))
         | 
| 109 | 
            +
                    if not clean:
         | 
| 110 | 
            +
                        name_parts.append("noclean")
         | 
| 111 | 
            +
                    if realnewslike:
         | 
| 112 | 
            +
                        name_parts.append("realnewslike")
         | 
| 113 | 
            +
                    if webtextlike:
         | 
| 114 | 
            +
                        name_parts.append("webtextlike")
         | 
| 115 | 
            +
                    name = ".".join(name_parts)
         | 
| 116 | 
            +
                    super(C4Config, self).__init__(name=name, version=_VERSION, **kwargs)
         | 
| 117 | 
            +
                    self.lang = language
         | 
| 118 | 
            +
                    self.cc_versions = cc_versions or (_DEFAULT_WEBTEXTLIKE_CC_VERSIONS if webtextlike else _DEFAULT_CC_VERSIONS)
         | 
| 119 | 
            +
                    self.clean = clean
         | 
| 120 | 
            +
                    self.realnewslike = realnewslike
         | 
| 121 | 
            +
                    self.webtextlike = webtextlike
         | 
| 122 | 
            +
             | 
| 123 | 
            +
             | 
| 124 | 
            +
            class C4(datasets.BeamBasedBuilder):
         | 
| 125 | 
            +
                """C4 dataset based on Common Crawl."""
         | 
| 126 | 
            +
             | 
| 127 | 
            +
                BUILDER_CONFIGS = [
         | 
| 128 | 
            +
                    C4Config(language="en", description="English C4 dataset."),
         | 
| 129 | 
            +
                    C4Config(
         | 
| 130 | 
            +
                        language="en",
         | 
| 131 | 
            +
                        clean=False,
         | 
| 132 | 
            +
                        description="Disables all cleaning (deduplication, removal based on bad words, " "etc.)",
         | 
| 133 | 
            +
                    ),
         | 
| 134 | 
            +
                    C4Config(
         | 
| 135 | 
            +
                        language="en",
         | 
| 136 | 
            +
                        realnewslike=True,
         | 
| 137 | 
            +
                        description="Filters from the default config to only include content from the "
         | 
| 138 | 
            +
                        "domains used in the 'RealNews' dataset (Zellers et al., 2019).",
         | 
| 139 | 
            +
                    ),
         | 
| 140 | 
            +
                    C4Config(
         | 
| 141 | 
            +
                        language="en",
         | 
| 142 | 
            +
                        webtextlike=True,
         | 
| 143 | 
            +
                        description="Filters from the default config to only include content from the "
         | 
| 144 | 
            +
                        "URLs in OpenWebText (https://github.com/jcpeterson/openwebtext).",
         | 
| 145 | 
            +
                    ),
         | 
| 146 | 
            +
                ]
         | 
| 147 | 
            +
             | 
| 148 | 
            +
                def manual_download_instructions(self):
         | 
| 149 | 
            +
                    return """\
         | 
| 150 | 
            +
                For the WebText-like config, you must manually download 'OpenWebText.zip'
         | 
| 151 | 
            +
                (from https://mega.nz/#F!EZZD0YwJ!9_PlEQzdMVLaNdKv_ICNVQ) and the Common Crawl
         | 
| 152 | 
            +
                WET files from August 2018 to July 2019
         | 
| 153 | 
            +
                (https://commoncrawl.org/the-data/get-started/) and place them in the
         | 
| 154 | 
            +
                `data_dir`.
         | 
| 155 | 
            +
                    """
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                def _info(self):
         | 
| 158 | 
            +
                    features = {
         | 
| 159 | 
            +
                        "text": datasets.Value("string"),
         | 
| 160 | 
            +
                        "url": datasets.Value("string"),
         | 
| 161 | 
            +
                        "content-type": datasets.Value("string"),
         | 
| 162 | 
            +
                        "content-length": datasets.Value("string"),
         | 
| 163 | 
            +
                        "timestamp": datasets.Value("string"),
         | 
| 164 | 
            +
                    }
         | 
| 165 | 
            +
                    return datasets.DatasetInfo(
         | 
| 166 | 
            +
                        description=_DESCRIPTION,
         | 
| 167 | 
            +
                        features=datasets.Features(features),
         | 
| 168 | 
            +
                        citation=_CITATION,
         | 
| 169 | 
            +
                        homepage="https://github.com/google-research/text-to-text-transfer-transformer#datasets",
         | 
| 170 | 
            +
                    )
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                def _split_generators(self, dl_manager, pipeline):
         | 
| 173 | 
            +
                    import apache_beam as beam
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                    # We will automatically down the default CC version(s), but others need to
         | 
| 176 | 
            +
                    # be manually downloaded.
         | 
| 177 | 
            +
                    cc_versions = set(self.config.cc_versions)
         | 
| 178 | 
            +
                    auto_cc_versions = cc_versions & set(_DEFAULT_CC_VERSIONS)
         | 
| 179 | 
            +
                    manual_cc_versions = cc_versions - set(_DEFAULT_CC_VERSIONS)
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                    files_to_download = {}
         | 
| 182 | 
            +
                    files_to_download["wet_path_urls"] = [
         | 
| 183 | 
            +
                        _WET_PATH_URL.format(cc_version=cc_version) for cc_version in auto_cc_versions
         | 
| 184 | 
            +
                    ]
         | 
| 185 | 
            +
                    if self.config.clean:
         | 
| 186 | 
            +
                        files_to_download["badwords"] = _BADWORDS_URL.format(lang=self.config.lang)
         | 
| 187 | 
            +
                    if self.config.realnewslike:
         | 
| 188 | 
            +
                        files_to_download["realnews_domains"] = _REALNEWS_DOMAINS_URL
         | 
| 189 | 
            +
                    file_paths = dl_manager.download_and_extract(files_to_download)
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                    if self.config.webtextlike:
         | 
| 192 | 
            +
                        owt_path = os.path.join(dl_manager.manual_dir, _OPENWEBTEXT_URLS_ZIP)
         | 
| 193 | 
            +
                        if not os.path.exists(owt_path):
         | 
| 194 | 
            +
                            raise FileNotFoundError(
         | 
| 195 | 
            +
                                "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('c4', data_dir=...)` that includes a file name {}. Manual download instructions: {})".format(
         | 
| 196 | 
            +
                                    owt_path, _OPENWEBTEXT_URLS_ZIP, self.manual_download_instructions
         | 
| 197 | 
            +
                                )
         | 
| 198 | 
            +
                            )
         | 
| 199 | 
            +
                        file_paths["openwebtext_urls_zip"] = dl_manager.extract(owt_path)
         | 
| 200 | 
            +
             | 
| 201 | 
            +
                    wet_urls = []
         | 
| 202 | 
            +
                    for wet_path_url in file_paths["wet_path_urls"]:
         | 
| 203 | 
            +
                        with open(wet_path_url, "r", encoding="utf-8") as f:
         | 
| 204 | 
            +
                            wet_urls.extend(["%s/%s" % (_DOWNLOAD_HOST, l.strip()) for l in f])
         | 
| 205 | 
            +
                    file_paths["wet_urls"] = wet_urls
         | 
| 206 | 
            +
                    file_paths["wet_files"] = []
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                    for cc_version in manual_cc_versions:
         | 
| 209 | 
            +
                        cc_dir = os.path.join(dl_manager.manual_dir, cc_version)
         | 
| 210 | 
            +
                        wet_files = beam.io.filesystems.FileSystems.match(os.path.join(cc_dir, "*.warc.wet.gz"))
         | 
| 211 | 
            +
                        if not os.path.exists(cc_dir):
         | 
| 212 | 
            +
                            raise FileNotFoundError(
         | 
| 213 | 
            +
                                "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('c4', data_dir=...)` that includes the files {}. Manual download instructions: {})".format(
         | 
| 214 | 
            +
                                    cc_dir, "*.warc.wet.gz", self.manual_download_instructions
         | 
| 215 | 
            +
                                )
         | 
| 216 | 
            +
                            )
         | 
| 217 | 
            +
                        logging.info("Adding %d WET files for manually downloaded version %s.", len(wet_files), cc_version)
         | 
| 218 | 
            +
                        file_paths["wet_files"].extend(wet_files)
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                    page_content_pcollection = self._get_page_content(pipeline, file_paths, dl_manager)
         | 
| 221 | 
            +
                    return [
         | 
| 222 | 
            +
                        datasets.SplitGenerator(
         | 
| 223 | 
            +
                            name=datasets.Split.TRAIN,
         | 
| 224 | 
            +
                            gen_kwargs=dict(
         | 
| 225 | 
            +
                                split="train",
         | 
| 226 | 
            +
                                page_content=page_content_pcollection,
         | 
| 227 | 
            +
                                hashed_url_predicate=lambda x: x % 1000 != 0,  # 99.9%
         | 
| 228 | 
            +
                            ),
         | 
| 229 | 
            +
                        ),
         | 
| 230 | 
            +
                        datasets.SplitGenerator(
         | 
| 231 | 
            +
                            name=datasets.Split.VALIDATION,
         | 
| 232 | 
            +
                            gen_kwargs=dict(
         | 
| 233 | 
            +
                                split="validation",
         | 
| 234 | 
            +
                                page_content=page_content_pcollection,
         | 
| 235 | 
            +
                                hashed_url_predicate=lambda x: x % 1000 == 0,  # 0.01%
         | 
| 236 | 
            +
                            ),
         | 
| 237 | 
            +
                        ),
         | 
| 238 | 
            +
                    ]
         | 
| 239 | 
            +
             | 
| 240 | 
            +
                def _get_page_content(self, pipeline, file_paths, dl_manager):
         | 
| 241 | 
            +
                    """Build PCollection of un-split page content."""
         | 
| 242 | 
            +
                    import apache_beam as beam
         | 
| 243 | 
            +
             | 
| 244 | 
            +
                    wet_file_paths = pipeline | "create_wet_files" >> beam.Create(file_paths["wet_files"])
         | 
| 245 | 
            +
                    if "wet_urls" in file_paths:
         | 
| 246 | 
            +
             | 
| 247 | 
            +
                        def download_url(url, downloader, pipeline):
         | 
| 248 | 
            +
                            path = downloader.download(url)
         | 
| 249 | 
            +
                            if not pipeline.is_local():
         | 
| 250 | 
            +
                                path = downloader.ship_files_with_pipeline(path, pipeline)
         | 
| 251 | 
            +
                            return path
         | 
| 252 | 
            +
             | 
| 253 | 
            +
                        dl_wet_file_paths = (
         | 
| 254 | 
            +
                            pipeline
         | 
| 255 | 
            +
                            | "create_wet_urls" >> beam.Create(file_paths["wet_urls"])
         | 
| 256 | 
            +
                            | beam.Map(download_url, downloader=dl_manager, pipeline=pipeline)
         | 
| 257 | 
            +
                        )
         | 
| 258 | 
            +
                        wet_file_paths = (wet_file_paths, dl_wet_file_paths) | beam.Flatten()
         | 
| 259 | 
            +
             | 
| 260 | 
            +
                    # Parse WET files and filter by length.
         | 
| 261 | 
            +
                    # Output: url, text
         | 
| 262 | 
            +
                    page_content = wet_file_paths | beam.FlatMap(split_wet_file) | beam.Filter(is_valid_length)
         | 
| 263 | 
            +
             | 
| 264 | 
            +
                    # Optionally filter for RealNews domains.
         | 
| 265 | 
            +
                    # Output: url, text
         | 
| 266 | 
            +
                    if self.config.realnewslike:
         | 
| 267 | 
            +
                        with open(file_paths["realnews_domains"], "r", encoding="utf-8") as f:
         | 
| 268 | 
            +
                            realnews_domains = json.load(f)
         | 
| 269 | 
            +
                        page_content = page_content | beam.Filter(is_realnews_domain, realnews_domains)
         | 
| 270 | 
            +
             | 
| 271 | 
            +
                    # Normalize and deduplicate by URL.
         | 
| 272 | 
            +
                    # Output: url, text
         | 
| 273 | 
            +
                    page_content = (
         | 
| 274 | 
            +
                        page_content
         | 
| 275 | 
            +
                        | "normalize_url" >> beam.Map(normalize_url)
         | 
| 276 | 
            +
                        | "group_url" >> beam.GroupByKey()
         | 
| 277 | 
            +
                        | beam.Map(dedupe_urls)
         | 
| 278 | 
            +
                    )
         | 
| 279 | 
            +
             | 
| 280 | 
            +
                    # Optionally filter for WebText-like URLs.
         | 
| 281 | 
            +
                    # Output: url, text
         | 
| 282 | 
            +
                    if self.config.webtextlike:
         | 
| 283 | 
            +
                        webtextlike_urls = (
         | 
| 284 | 
            +
                            pipeline
         | 
| 285 | 
            +
                            | "read_webtextlike_urls"
         | 
| 286 | 
            +
                            >> beam.io.ReadFromText(
         | 
| 287 | 
            +
                                os.path.join(file_paths["openwebtext_urls_zip"], _OPENWEBTEXT_URLS_FILE_PATTERN)
         | 
| 288 | 
            +
                            )
         | 
| 289 | 
            +
                            | "add_dummy_page" >> beam.Map(lambda x: (x, ""))
         | 
| 290 | 
            +
                            | "normal_webtext_url" >> beam.Map(normalize_url)
         | 
| 291 | 
            +
                        )
         | 
| 292 | 
            +
                        page_content = (
         | 
| 293 | 
            +
                            {"text": page_content, "webtextlike_urls": webtextlike_urls}
         | 
| 294 | 
            +
                            | "group_webtextlike_urls" >> beam.CoGroupByKey()
         | 
| 295 | 
            +
                            | beam.FlatMap(filter_by_webtextlike)
         | 
| 296 | 
            +
                        )
         | 
| 297 | 
            +
             | 
| 298 | 
            +
                    # Optionally clean pages of badwords, boilerpolate text, and duplicate
         | 
| 299 | 
            +
                    # spans of sentences.
         | 
| 300 | 
            +
                    # Output: url, text
         | 
| 301 | 
            +
                    if self.config.clean:
         | 
| 302 | 
            +
                        with open(file_paths["badwords"], "r", encoding="utf-8") as f:
         | 
| 303 | 
            +
                            badwords = [l.strip() for l in f]
         | 
| 304 | 
            +
                        page_content = page_content | "clean_pages" >> beam.FlatMap(get_clean_page_fn(badwords))
         | 
| 305 | 
            +
                        page_content = remove_duplicate_text(page_content)
         | 
| 306 | 
            +
             | 
| 307 | 
            +
                    # Optionally filter out non-`language` pages. We do this after cleaning
         | 
| 308 | 
            +
                    # since it may change the predominate language.
         | 
| 309 | 
            +
                    if self.config.lang != "all":
         | 
| 310 | 
            +
                        page_content |= beam.Filter(is_language, language=self.config.lang)
         | 
| 311 | 
            +
             | 
| 312 | 
            +
                    return page_content
         | 
| 313 | 
            +
             | 
| 314 | 
            +
                def _build_pcollection(self, unused_pipeline, split, page_content, hashed_url_predicate):
         | 
| 315 | 
            +
                    import apache_beam as beam
         | 
| 316 | 
            +
             | 
| 317 | 
            +
                    def _emit_examples(el):
         | 
| 318 | 
            +
                        get_counter_inc_fn(split)("examples")
         | 
| 319 | 
            +
                        _, features = el
         | 
| 320 | 
            +
                        return (
         | 
| 321 | 
            +
                            features["url"],
         | 
| 322 | 
            +
                            {
         | 
| 323 | 
            +
                                "url": features["url"],
         | 
| 324 | 
            +
                                "text": features["text"],
         | 
| 325 | 
            +
                                "content-type": features["content-type"],
         | 
| 326 | 
            +
                                "content-length": features["content-length"],
         | 
| 327 | 
            +
                                "timestamp": features["timestamp"],
         | 
| 328 | 
            +
                            },
         | 
| 329 | 
            +
                        )
         | 
| 330 | 
            +
             | 
| 331 | 
            +
                    return page_content | beam.Filter(get_hashed_url_filter_fn(hashed_url_predicate)) | beam.Map(_emit_examples)
         | 
    	
        c4_utils.py
    ADDED
    
    | @@ -0,0 +1,489 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # coding=utf-8
         | 
| 2 | 
            +
            # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
         | 
| 3 | 
            +
            #
         | 
| 4 | 
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         | 
| 5 | 
            +
            # you may not use this file except in compliance with the License.
         | 
| 6 | 
            +
            # You may obtain a copy of the License at
         | 
| 7 | 
            +
            #
         | 
| 8 | 
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         | 
| 9 | 
            +
            #
         | 
| 10 | 
            +
            # Unless required by applicable law or agreed to in writing, software
         | 
| 11 | 
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         | 
| 12 | 
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         | 
| 13 | 
            +
            # See the License for the specific language governing permissions and
         | 
| 14 | 
            +
            # limitations under the License.
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # Lint as: python3
         | 
| 17 | 
            +
            """Utilities for generating the C4 dataset."""
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            from __future__ import absolute_import, division, print_function
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            import functools
         | 
| 22 | 
            +
            import gzip
         | 
| 23 | 
            +
            import hashlib
         | 
| 24 | 
            +
            import io
         | 
| 25 | 
            +
            import re
         | 
| 26 | 
            +
            import threading
         | 
| 27 | 
            +
             | 
| 28 | 
            +
             | 
| 29 | 
            +
            # WET file constants
         | 
| 30 | 
            +
            _PAGE_DELIMITER = "WARC/1.0"
         | 
| 31 | 
            +
            _URL_KEY = "WARC-Target-URI:"
         | 
| 32 | 
            +
            _URL_DATE = "WARC-Date:"
         | 
| 33 | 
            +
            _CONTENT_TYPE = "Content-Type:"
         | 
| 34 | 
            +
            _CONTENT_LEN = "Content-Length:"
         | 
| 35 | 
            +
            _METADATA_PREFIXES = ("WARC", "CONTENT-", "Content-")
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            # Filters
         | 
| 38 | 
            +
            _MIN_WORDS_PER_LINE = 5
         | 
| 39 | 
            +
            _MIN_NUM_SENTENCES = 3
         | 
| 40 | 
            +
            _MAX_WORD_LENGTH = 1000
         | 
| 41 | 
            +
            _END_MARKS = (".", "?", "!", '"')
         | 
| 42 | 
            +
            _ELLIPSIS = "..."
         | 
| 43 | 
            +
            _POLICY_SUBSTRINGS = [
         | 
| 44 | 
            +
                "terms of use",
         | 
| 45 | 
            +
                "privacy policy",
         | 
| 46 | 
            +
                "cookie policy",
         | 
| 47 | 
            +
                "uses cookies",
         | 
| 48 | 
            +
                "use of cookies",
         | 
| 49 | 
            +
                "use cookies",
         | 
| 50 | 
            +
            ]
         | 
| 51 | 
            +
             | 
| 52 | 
            +
            # Memoized sentence tokenizer.
         | 
| 53 | 
            +
            _SENTENCE_TOKENIZER = None
         | 
| 54 | 
            +
             | 
| 55 | 
            +
             | 
| 56 | 
            +
            def get_counter_inc_fn(namespace):
         | 
| 57 | 
            +
                import apache_beam as beam
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                def counter_inc_fn(counter, amt=1):
         | 
| 60 | 
            +
                    beam.metrics.Metrics.counter(namespace, counter).inc(amt)
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                return counter_inc_fn
         | 
| 63 | 
            +
             | 
| 64 | 
            +
             | 
| 65 | 
            +
            def get_hashed_url_filter_fn(predicate_fn):
         | 
| 66 | 
            +
                import tensorflow.compat.v2 as tf
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                def filter_fn(el):
         | 
| 69 | 
            +
                    url, _ = el
         | 
| 70 | 
            +
                    val = int(hashlib.md5(tf.compat.as_text(url).encode("utf-8")).hexdigest(), 16)
         | 
| 71 | 
            +
                    return predicate_fn(val)
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                return filter_fn
         | 
| 74 | 
            +
             | 
| 75 | 
            +
             | 
| 76 | 
            +
            def _load_sentence_tokenizer():
         | 
| 77 | 
            +
                """Returns a sentence tokenization function."""
         | 
| 78 | 
            +
                # Lock to avoid a race-condition in the creation of the download directory.
         | 
| 79 | 
            +
                with threading.Lock():
         | 
| 80 | 
            +
                    import nltk
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                    nltk.download("punkt")
         | 
| 83 | 
            +
                    return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
         | 
| 84 | 
            +
             | 
| 85 | 
            +
             | 
| 86 | 
            +
            def _get_sentences(text):
         | 
| 87 | 
            +
                import tensorflow.compat.v2 as tf
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                global _SENTENCE_TOKENIZER
         | 
| 90 | 
            +
                if not _SENTENCE_TOKENIZER:
         | 
| 91 | 
            +
                    _SENTENCE_TOKENIZER = _load_sentence_tokenizer()
         | 
| 92 | 
            +
                return list(_SENTENCE_TOKENIZER.tokenize(tf.compat.as_text(text)))
         | 
| 93 | 
            +
             | 
| 94 | 
            +
             | 
| 95 | 
            +
            def _get_sentences_by_line(text, lower=False):
         | 
| 96 | 
            +
                sentences = []
         | 
| 97 | 
            +
                for line in text.splitlines():
         | 
| 98 | 
            +
                    sentences.append([s.lower() if lower else s for s in _get_sentences(line)])
         | 
| 99 | 
            +
                return sentences
         | 
| 100 | 
            +
             | 
| 101 | 
            +
             | 
| 102 | 
            +
            def is_language(page, language, min_probability=0.99):
         | 
| 103 | 
            +
                """Returns True iff text is in `language` with at least `min_probability`."""
         | 
| 104 | 
            +
                unused_url, features = page
         | 
| 105 | 
            +
                text = features["text"]
         | 
| 106 | 
            +
             | 
| 107 | 
            +
                counter_inc_fn = get_counter_inc_fn("detected-lang")
         | 
| 108 | 
            +
             | 
| 109 | 
            +
                # Make langdetect predictions deterministic.
         | 
| 110 | 
            +
                import langdetect
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                langdetect.DetectorFactory.seed = 0
         | 
| 113 | 
            +
                try:
         | 
| 114 | 
            +
                    predictions = langdetect.detect_langs(text)
         | 
| 115 | 
            +
                except langdetect.lang_detect_exception.LangDetectException:
         | 
| 116 | 
            +
                    counter_inc_fn("langdetect-exception")
         | 
| 117 | 
            +
                    return False
         | 
| 118 | 
            +
                if not predictions:
         | 
| 119 | 
            +
                    counter_inc_fn("page-filtered-nolangpredictions")
         | 
| 120 | 
            +
                    return False
         | 
| 121 | 
            +
                best_prediction = predictions[0]
         | 
| 122 | 
            +
                if best_prediction.prob < min_probability:
         | 
| 123 | 
            +
                    counter_inc_fn("page-filtered-lowlangdetectconf")
         | 
| 124 | 
            +
                    return False
         | 
| 125 | 
            +
                if best_prediction.lang != language:
         | 
| 126 | 
            +
                    counter_inc_fn("page-filtered-ignoredlang")
         | 
| 127 | 
            +
                    counter_inc_fn("page-filtered-ignoredlang-%s" % (best_prediction.lang))
         | 
| 128 | 
            +
                    return False
         | 
| 129 | 
            +
                counter_inc_fn("page-emited-%s" % best_prediction.lang)
         | 
| 130 | 
            +
                return True
         | 
| 131 | 
            +
             | 
| 132 | 
            +
             | 
| 133 | 
            +
            def get_clean_page_fn(badwords=None):
         | 
| 134 | 
            +
                """Returns `clean_page` with pre-compiled badword and citation regexes."""
         | 
| 135 | 
            +
                # Used to filter citation from Wikipedia pages (among others).
         | 
| 136 | 
            +
                citation_regex = re.compile(r"\[\d*\]|\[edit\]|\[citation needed\]")
         | 
| 137 | 
            +
                if badwords:
         | 
| 138 | 
            +
                    badwords_regex = re.compile("[^a-z]({})[^a-z]".format("|".join(badwords or [])))
         | 
| 139 | 
            +
                else:
         | 
| 140 | 
            +
                    badwords_regex = None
         | 
| 141 | 
            +
                return functools.partial(clean_page, citation_regex=citation_regex, badwords_regex=badwords_regex)
         | 
| 142 | 
            +
             | 
| 143 | 
            +
             | 
| 144 | 
            +
            def clean_page(
         | 
| 145 | 
            +
                url_and_features,
         | 
| 146 | 
            +
                citation_regex,
         | 
| 147 | 
            +
                badwords_regex=None,
         | 
| 148 | 
            +
                counter_inc_fn=None,
         | 
| 149 | 
            +
                min_words_per_line=_MIN_WORDS_PER_LINE,
         | 
| 150 | 
            +
                min_num_sentences=_MIN_NUM_SENTENCES,
         | 
| 151 | 
            +
                max_word_length=_MAX_WORD_LENGTH,
         | 
| 152 | 
            +
            ):
         | 
| 153 | 
            +
                """Cleans a CommonCrawl page, yielding nothing if it should be skipped.
         | 
| 154 | 
            +
             | 
| 155 | 
            +
                Cleaning removes lines with no end marks or with too few words. After line
         | 
| 156 | 
            +
                filtering, pages are filtered out if they have too few sentences based on a
         | 
| 157 | 
            +
                simple count of end marks.
         | 
| 158 | 
            +
             | 
| 159 | 
            +
                Args:
         | 
| 160 | 
            +
                    url_and_features: tuple(string, dict), the url and features of the page.
         | 
| 161 | 
            +
                    citation_regex: Regex to use for finding Wikipedia-like citations to filter.
         | 
| 162 | 
            +
                    badwords_regex: Regex to use for finding badwords. Default None, which means
         | 
| 163 | 
            +
                        don't apply badwords filtering.
         | 
| 164 | 
            +
                    counter_inc_fn: function, a function taking the name of a counter to be
         | 
| 165 | 
            +
                        incremented and the (optional) amount. Defaults to a beam Metric counter.
         | 
| 166 | 
            +
                    min_words_per_line: int, the minimum number of words a line needs to not be
         | 
| 167 | 
            +
                        removed.
         | 
| 168 | 
            +
                    min_num_sentences: int, the minimum number of sentences a page needs to not
         | 
| 169 | 
            +
                        be skipped.
         | 
| 170 | 
            +
                    max_word_length: int, the maximum number of characters allowed in a word.
         | 
| 171 | 
            +
                        Lines containing a word with too many characters are removed.
         | 
| 172 | 
            +
                Yields:
         | 
| 173 | 
            +
                    The url and cleaned text for the page.
         | 
| 174 | 
            +
                """
         | 
| 175 | 
            +
                url, features = url_and_features
         | 
| 176 | 
            +
                text = features["text"]
         | 
| 177 | 
            +
             | 
| 178 | 
            +
                if not counter_inc_fn:
         | 
| 179 | 
            +
                    counter_inc_fn = get_counter_inc_fn("clean-page")
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                lines = text.splitlines()
         | 
| 182 | 
            +
                valid_lines = []
         | 
| 183 | 
            +
                num_sentences = 0
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                def line_has_too_long_word(line):
         | 
| 186 | 
            +
                    for word in line.split():
         | 
| 187 | 
            +
                        if len(word) > max_word_length:
         | 
| 188 | 
            +
                            return True
         | 
| 189 | 
            +
                    return False
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                for line in lines:
         | 
| 192 | 
            +
                    line = line.strip()
         | 
| 193 | 
            +
                    if line_has_too_long_word(line):
         | 
| 194 | 
            +
                        counter_inc_fn("lines-with-too-long-word")
         | 
| 195 | 
            +
                        continue
         | 
| 196 | 
            +
                    line = citation_regex.sub("", line)
         | 
| 197 | 
            +
                    if not line.endswith(_END_MARKS) or line.endswith(_ELLIPSIS):
         | 
| 198 | 
            +
                        counter_inc_fn("lines-no-endmark")
         | 
| 199 | 
            +
                        continue
         | 
| 200 | 
            +
                    if len(line.split()) < min_words_per_line:
         | 
| 201 | 
            +
                        counter_inc_fn("lines-too-short")
         | 
| 202 | 
            +
                        continue
         | 
| 203 | 
            +
                    line_lower = line.lower()
         | 
| 204 | 
            +
                    # Remove documents which contain lorem ipsum
         | 
| 205 | 
            +
                    if "lorem ipsum" in line_lower:
         | 
| 206 | 
            +
                        counter_inc_fn("filtered-page-loremipsum")
         | 
| 207 | 
            +
                        return
         | 
| 208 | 
            +
                    # Remove "javascript must be enabled" notices
         | 
| 209 | 
            +
                    if "javascript" in line_lower:
         | 
| 210 | 
            +
                        counter_inc_fn("lines-javascript")
         | 
| 211 | 
            +
                        continue
         | 
| 212 | 
            +
                    # Remove docs which probably contain javascript code
         | 
| 213 | 
            +
                    if "{" in line:
         | 
| 214 | 
            +
                        counter_inc_fn("filtered-page-squigglybracket")
         | 
| 215 | 
            +
                        return
         | 
| 216 | 
            +
                    # Remove policy lines
         | 
| 217 | 
            +
                    if any(p in line_lower for p in _POLICY_SUBSTRINGS):
         | 
| 218 | 
            +
                        counter_inc_fn("lines-policy")
         | 
| 219 | 
            +
                        continue
         | 
| 220 | 
            +
                    # If any badword appears on its own in the line, skip this doc
         | 
| 221 | 
            +
                    if badwords_regex:
         | 
| 222 | 
            +
                        badwords_found = badwords_regex.search(line_lower)
         | 
| 223 | 
            +
                        if badwords_found is not None:
         | 
| 224 | 
            +
                            counter_inc_fn("filtered-page-badword")
         | 
| 225 | 
            +
                            return
         | 
| 226 | 
            +
                    num_sentences += len(_get_sentences(line))
         | 
| 227 | 
            +
                    valid_lines.append(line)
         | 
| 228 | 
            +
                    counter_inc_fn("lines-valid")
         | 
| 229 | 
            +
             | 
| 230 | 
            +
                if num_sentences < min_num_sentences:
         | 
| 231 | 
            +
                    counter_inc_fn("filtered-page-toofewsentences")
         | 
| 232 | 
            +
                    return
         | 
| 233 | 
            +
                counter_inc_fn("emitted-clean-pages")
         | 
| 234 | 
            +
                features["text"] = "\n".join(valid_lines).strip()
         | 
| 235 | 
            +
                yield url, features
         | 
| 236 | 
            +
             | 
| 237 | 
            +
             | 
| 238 | 
            +
            def _hash_line(line):
         | 
| 239 | 
            +
                import tensorflow.compat.v2 as tf
         | 
| 240 | 
            +
             | 
| 241 | 
            +
                m = hashlib.md5()
         | 
| 242 | 
            +
                m.update(tf.compat.as_text(line).encode("utf-8").strip().lower())
         | 
| 243 | 
            +
                return m.hexdigest()
         | 
| 244 | 
            +
             | 
| 245 | 
            +
             | 
| 246 | 
            +
            def _emit_url_to_lines(page):
         | 
| 247 | 
            +
                """Emits url to all (lower-cased, hashed) lines."""
         | 
| 248 | 
            +
                url, features = page
         | 
| 249 | 
            +
                text = features["text"]
         | 
| 250 | 
            +
                for line in text.split("\n"):
         | 
| 251 | 
            +
                    yield _hash_line(line), url
         | 
| 252 | 
            +
             | 
| 253 | 
            +
             | 
| 254 | 
            +
            def _emit_line_to_urls(el, counter_inc_fn):
         | 
| 255 | 
            +
                """Emits (hashed) line to all but one url."""
         | 
| 256 | 
            +
                import tensorflow.compat.v2 as tf
         | 
| 257 | 
            +
             | 
| 258 | 
            +
                line, urls = el
         | 
| 259 | 
            +
                # Materialize urls as a list.
         | 
| 260 | 
            +
                urls = list(urls)
         | 
| 261 | 
            +
                # Hash urls and sort to have a consistent, but unbiased, selection when the
         | 
| 262 | 
            +
                # same urls exist for multiple lines.
         | 
| 263 | 
            +
                skip_url = min(urls, key=lambda x: hashlib.md5(tf.compat.as_text(x).encode("utf-8")).hexdigest())
         | 
| 264 | 
            +
                for url in urls:
         | 
| 265 | 
            +
                    if url != skip_url:
         | 
| 266 | 
            +
                        yield url, line
         | 
| 267 | 
            +
                counter_inc_fn("emitted-line-duplicate", amt=len(urls) - 1)
         | 
| 268 | 
            +
             | 
| 269 | 
            +
             | 
| 270 | 
            +
            def _remove_lines_from_text(el, counter_inc_fn, min_num_sentences=_MIN_NUM_SENTENCES):
         | 
| 271 | 
            +
                """Removes matching lines from the page.
         | 
| 272 | 
            +
             | 
| 273 | 
            +
                Process the result of a join containing a single value for 'features' and zero
         | 
| 274 | 
            +
                or more values for 'lines'. Each value in 'lines' is a lower-cased, hashed
         | 
| 275 | 
            +
                line.
         | 
| 276 | 
            +
             | 
| 277 | 
            +
                If a line has fewer sentences than `max_window_size`, the full line is
         | 
| 278 | 
            +
                compared for a match.
         | 
| 279 | 
            +
             | 
| 280 | 
            +
                Args:
         | 
| 281 | 
            +
                    el: `(string, {'features': features_dict, 'lines': [string]})`,
         | 
| 282 | 
            +
                        element containing the result of a join on key with both the page text
         | 
| 283 | 
            +
                        and lower-cased, hashed lines to remove.
         | 
| 284 | 
            +
                    counter_inc_fn: function, a function taking the name of a counter to be
         | 
| 285 | 
            +
                        incremented and the (optional) amount.
         | 
| 286 | 
            +
                    min_num_sentences: int, the minimum number of sentences a page needs to not
         | 
| 287 | 
            +
                        be skipped.
         | 
| 288 | 
            +
             | 
| 289 | 
            +
                Yields:
         | 
| 290 | 
            +
                    url: The URL of the page.
         | 
| 291 | 
            +
                    features: The page features with lines removed from text.
         | 
| 292 | 
            +
                """
         | 
| 293 | 
            +
                url, join_values = el
         | 
| 294 | 
            +
                features = join_values["features"]
         | 
| 295 | 
            +
             | 
| 296 | 
            +
                assert len(features) == 1, "Invalid page count (%d) for %s" % (len(features), url)
         | 
| 297 | 
            +
                features = features[0]
         | 
| 298 | 
            +
                text = features["text"]
         | 
| 299 | 
            +
                lines_to_remove = set(join_values["lines"])
         | 
| 300 | 
            +
                new_lines = []
         | 
| 301 | 
            +
                hashed_lines = set()
         | 
| 302 | 
            +
                for line in text.split("\n"):
         | 
| 303 | 
            +
                    hashed_line = _hash_line(line)
         | 
| 304 | 
            +
                    if hashed_line in lines_to_remove:
         | 
| 305 | 
            +
                        counter_inc_fn("filtered-lines-duplicate")
         | 
| 306 | 
            +
                    elif hashed_line not in hashed_lines:
         | 
| 307 | 
            +
                        new_lines.append(line)
         | 
| 308 | 
            +
                        hashed_lines.add(hashed_line)
         | 
| 309 | 
            +
                new_text = "\n".join(new_lines)
         | 
| 310 | 
            +
                if len(_get_sentences(new_text)) < min_num_sentences:
         | 
| 311 | 
            +
                    counter_inc_fn("filtered-doc-toofewsentences")
         | 
| 312 | 
            +
                    return
         | 
| 313 | 
            +
                new_features = features.copy()
         | 
| 314 | 
            +
                new_features["text"] = new_text
         | 
| 315 | 
            +
                yield (url, new_features)
         | 
| 316 | 
            +
             | 
| 317 | 
            +
             | 
| 318 | 
            +
            def remove_duplicate_text(pages):
         | 
| 319 | 
            +
                """Utility to remove duplicate lines across text documents."""
         | 
| 320 | 
            +
                # Output: url, lines
         | 
| 321 | 
            +
                import apache_beam as beam
         | 
| 322 | 
            +
             | 
| 323 | 
            +
                counter_inc_fn = get_counter_inc_fn("dedupe-lines")
         | 
| 324 | 
            +
                lines_to_remove = (
         | 
| 325 | 
            +
                    pages
         | 
| 326 | 
            +
                    | beam.FlatMap(_emit_url_to_lines)
         | 
| 327 | 
            +
                    | "group_sentences" >> beam.GroupByKey()
         | 
| 328 | 
            +
                    | beam.FlatMap(_emit_line_to_urls, counter_inc_fn=counter_inc_fn)
         | 
| 329 | 
            +
                )
         | 
| 330 | 
            +
             | 
| 331 | 
            +
                # Output: url, text
         | 
| 332 | 
            +
                final_docs = (
         | 
| 333 | 
            +
                    {"features": pages, "lines": lines_to_remove}
         | 
| 334 | 
            +
                    | "group_features_and_lines_by_url" >> beam.CoGroupByKey()
         | 
| 335 | 
            +
                    | beam.FlatMap(_remove_lines_from_text, counter_inc_fn=counter_inc_fn)
         | 
| 336 | 
            +
                )
         | 
| 337 | 
            +
             | 
| 338 | 
            +
                return final_docs
         | 
| 339 | 
            +
             | 
| 340 | 
            +
             | 
| 341 | 
            +
            def split_wet_file(wet_file_path, counter_inc_fn=None):
         | 
| 342 | 
            +
                """Split a WET file into separate pages."""
         | 
| 343 | 
            +
                from absl import logging
         | 
| 344 | 
            +
             | 
| 345 | 
            +
                logging.info("Splitting file: %s", wet_file_path)
         | 
| 346 | 
            +
                if not counter_inc_fn:
         | 
| 347 | 
            +
                    counter_inc_fn = get_counter_inc_fn("split-wet-file")
         | 
| 348 | 
            +
                counter_inc_fn("wet-file")
         | 
| 349 | 
            +
             | 
| 350 | 
            +
                import apache_beam as beam
         | 
| 351 | 
            +
             | 
| 352 | 
            +
                with beam.io.filesystems.FileSystems.open(wet_file_path) as f, gzip.GzipFile(fileobj=f) as g:
         | 
| 353 | 
            +
                    url = None
         | 
| 354 | 
            +
                    content = None
         | 
| 355 | 
            +
                    content_len = None
         | 
| 356 | 
            +
                    content_type = None
         | 
| 357 | 
            +
                    timestamp = None
         | 
| 358 | 
            +
             | 
| 359 | 
            +
                    def _maybe_get_page():
         | 
| 360 | 
            +
                        """Generate a (url, {features}) page."""
         | 
| 361 | 
            +
                        if not url and url is not None:
         | 
| 362 | 
            +
                            counter_inc_fn("page-filtered-nourl")
         | 
| 363 | 
            +
                        if not content and content is not None:
         | 
| 364 | 
            +
                            counter_inc_fn("page-filtered-nocontent")
         | 
| 365 | 
            +
                        if not content_type and content_type is not None:
         | 
| 366 | 
            +
                            counter_inc_fn("page-nocontenttype")
         | 
| 367 | 
            +
                        if not content_len and content_len is not None:
         | 
| 368 | 
            +
                            counter_inc_fn("page-nocontentlen")
         | 
| 369 | 
            +
                        if not timestamp and timestamp is not None:
         | 
| 370 | 
            +
                            counter_inc_fn("page-notimestamp")
         | 
| 371 | 
            +
                        if content and url:
         | 
| 372 | 
            +
                            counter_inc_fn("page-emitted")
         | 
| 373 | 
            +
                            return (
         | 
| 374 | 
            +
                                url,
         | 
| 375 | 
            +
                                {
         | 
| 376 | 
            +
                                    "text": "\n".join(content),
         | 
| 377 | 
            +
                                    "content-type": content_type,
         | 
| 378 | 
            +
                                    "content-length": content_len,
         | 
| 379 | 
            +
                                    "timestamp": timestamp,
         | 
| 380 | 
            +
                                    "url": url,
         | 
| 381 | 
            +
                                },
         | 
| 382 | 
            +
                            )
         | 
| 383 | 
            +
                        return None
         | 
| 384 | 
            +
             | 
| 385 | 
            +
                    for line in io.TextIOWrapper(g, encoding="utf-8"):
         | 
| 386 | 
            +
                        line = line.strip()
         | 
| 387 | 
            +
                        if not line:
         | 
| 388 | 
            +
                            continue
         | 
| 389 | 
            +
                        if line == _PAGE_DELIMITER:
         | 
| 390 | 
            +
                            page = _maybe_get_page()
         | 
| 391 | 
            +
                            if page:
         | 
| 392 | 
            +
                                yield page
         | 
| 393 | 
            +
                            url = ""
         | 
| 394 | 
            +
                            content = []
         | 
| 395 | 
            +
                            content_len = ""
         | 
| 396 | 
            +
                            content_type = ""
         | 
| 397 | 
            +
                            timestamp = ""
         | 
| 398 | 
            +
             | 
| 399 | 
            +
                        if line.startswith(_URL_KEY):
         | 
| 400 | 
            +
                            url = line[len(_URL_KEY) :].strip()
         | 
| 401 | 
            +
             | 
| 402 | 
            +
                        if line.startswith(_URL_DATE):
         | 
| 403 | 
            +
                            timestamp = line[len(_URL_DATE) :].strip()
         | 
| 404 | 
            +
             | 
| 405 | 
            +
                        if line.startswith(_CONTENT_TYPE):
         | 
| 406 | 
            +
                            content_type = line[len(_CONTENT_TYPE) :].strip()
         | 
| 407 | 
            +
             | 
| 408 | 
            +
                        if line.startswith(_CONTENT_LEN):
         | 
| 409 | 
            +
                            content_len = line[len(_CONTENT_LEN) :].strip()
         | 
| 410 | 
            +
             | 
| 411 | 
            +
                        if line.startswith(_METADATA_PREFIXES):
         | 
| 412 | 
            +
                            continue
         | 
| 413 | 
            +
             | 
| 414 | 
            +
                        content.append(line)
         | 
| 415 | 
            +
             | 
| 416 | 
            +
                    page = _maybe_get_page()
         | 
| 417 | 
            +
                    if page:
         | 
| 418 | 
            +
                        yield page
         | 
| 419 | 
            +
             | 
| 420 | 
            +
             | 
| 421 | 
            +
            def dedupe_urls(el):
         | 
| 422 | 
            +
                """Returns the first value for a given URL."""
         | 
| 423 | 
            +
                counter_inc_fn = get_counter_inc_fn("dedupe-urls")
         | 
| 424 | 
            +
                url, vals = el
         | 
| 425 | 
            +
                cnt = 0
         | 
| 426 | 
            +
                v = None
         | 
| 427 | 
            +
                for v in vals:
         | 
| 428 | 
            +
                    cnt += 1
         | 
| 429 | 
            +
                counter_inc_fn("filtered-url-duplicate", cnt - 1)
         | 
| 430 | 
            +
                counter_inc_fn("unique-url")
         | 
| 431 | 
            +
                return url, v
         | 
| 432 | 
            +
             | 
| 433 | 
            +
             | 
| 434 | 
            +
            def is_valid_length(el, max_length=1.9e5):
         | 
| 435 | 
            +
                """Returns False iff page's text is too long."""
         | 
| 436 | 
            +
                counter_inc_fn = get_counter_inc_fn("is-valid-length")
         | 
| 437 | 
            +
                _, page = el
         | 
| 438 | 
            +
                if len(page["text"]) > max_length:
         | 
| 439 | 
            +
                    counter_inc_fn("filtered-page-contenttoolong")
         | 
| 440 | 
            +
                    return False
         | 
| 441 | 
            +
                counter_inc_fn("valid-length")
         | 
| 442 | 
            +
                return True
         | 
| 443 | 
            +
             | 
| 444 | 
            +
             | 
| 445 | 
            +
            def is_realnews_domain(el, realnews_domains):
         | 
| 446 | 
            +
                """Returns False iff page's (sub)domain is not allowed."""
         | 
| 447 | 
            +
                import tldextract
         | 
| 448 | 
            +
             | 
| 449 | 
            +
                counter_inc_fn = get_counter_inc_fn("is-realnews-domain")
         | 
| 450 | 
            +
                url, _ = el
         | 
| 451 | 
            +
                ext = tldextract.extract(url)
         | 
| 452 | 
            +
                main_domain = ext.domain + "." + ext.suffix
         | 
| 453 | 
            +
                if main_domain not in realnews_domains:
         | 
| 454 | 
            +
                    counter_inc_fn("filtered-url-invaliddomain")
         | 
| 455 | 
            +
                    return False
         | 
| 456 | 
            +
                allowed_subdomains = realnews_domains[main_domain]
         | 
| 457 | 
            +
                if isinstance(allowed_subdomains, list) and ext.subdomain not in allowed_subdomains:
         | 
| 458 | 
            +
                    counter_inc_fn("filtered-url-invalidsubdomain")
         | 
| 459 | 
            +
                    return False
         | 
| 460 | 
            +
                counter_inc_fn("realnews-domain")
         | 
| 461 | 
            +
                return True
         | 
| 462 | 
            +
             | 
| 463 | 
            +
             | 
| 464 | 
            +
            def filter_by_webtextlike(el):
         | 
| 465 | 
            +
                """Yields only pages with a matching WebText-like URL."""
         | 
| 466 | 
            +
                counter_inc_fn = get_counter_inc_fn("filter-by-webtextlike")
         | 
| 467 | 
            +
                url, join_values = el
         | 
| 468 | 
            +
                text = join_values["text"]
         | 
| 469 | 
            +
                webtextlike = join_values["webtextlike_urls"]
         | 
| 470 | 
            +
                if not webtextlike:
         | 
| 471 | 
            +
                    counter_inc_fn("filtered-url-notwebtextlike")
         | 
| 472 | 
            +
                    return
         | 
| 473 | 
            +
                if not text:
         | 
| 474 | 
            +
                    counter_inc_fn("missing-webtextlike")
         | 
| 475 | 
            +
                    return
         | 
| 476 | 
            +
                assert len(text) == 1
         | 
| 477 | 
            +
                counter_inc_fn("found-webtextlike")
         | 
| 478 | 
            +
                yield url, text[0]
         | 
| 479 | 
            +
             | 
| 480 | 
            +
             | 
| 481 | 
            +
            def normalize_url(el):
         | 
| 482 | 
            +
                import tensorflow.compat.v2 as tf
         | 
| 483 | 
            +
             | 
| 484 | 
            +
                url, val = el
         | 
| 485 | 
            +
                url = tf.compat.as_text(url)
         | 
| 486 | 
            +
                url = re.sub(r"https?:\/\/(www\.)?", "", url)
         | 
| 487 | 
            +
                url = re.sub(r"\?(utm_|ref|feed).*", "", url)
         | 
| 488 | 
            +
                url = url.rstrip("/")
         | 
| 489 | 
            +
                return url, val
         | 
