File size: 13,706 Bytes
a1a7dfb
67f6eb3
 
a08395c
 
 
 
6770b66
2973f7e
34ecf31
 
 
 
a1a7dfb
45cd785
34ecf31
 
 
 
 
 
 
 
 
 
 
 
 
 
a1a7dfb
 
3c38447
 
34ecf31
 
 
 
4f82979
34ecf31
 
67f6eb3
34ecf31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b25e42
34ecf31
 
 
 
 
 
 
1c66024
34ecf31
 
 
 
 
 
 
1c66024
34ecf31
 
 
 
 
 
 
1c66024
34ecf31
 
 
 
3c38447
a1a7dfb
 
85e7ef7
 
 
 
 
7b25e42
c15c7a5
7b25e42
 
 
34ecf31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1a7dfb
45cd785
34ecf31
a1a7dfb
 
 
d84fec1
 
 
27361f1
c58264f
e55aeba
 
 
 
 
 
 
c58264f
27361f1
2477fa9
3c38447
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2477fa9
 
3c38447
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2477fa9
b34cbe1
 
 
f593237
 
 
 
 
 
 
 
 
 
 
c58264f
f593237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c38447
 
 
496e296
3c38447
34ecf31
a08395c
34ecf31
f6ff462
a08395c
34ecf31
a08395c
34ecf31
a08395c
a1a7dfb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
from fasthtml.common import *
from fasthtml.components import *
from fasthtml.components import D_title, D_article, D_front_matter, D_contents, D_byline
from plotly import graph_objects as go
from fh_plotly import plotly2fasthtml
import pandas as pd
import json
from rich import print
import overview
import curated
import web
import common
import results


app, rt = fast_app(
    debug=True,
    pico=False,
    hdrs=(
        Meta(charset="UTF-8"),
        Meta(name="viewport", content="width=device-width, initial-scale=1.0"),
        Script(src="https://distill.pub/template.v2.js"),
        Script(src="https://unpkg.com/htmx.org@next/dist/htmx.min.js"),
        Script(src="https://cdn.plot.ly/plotly-latest.min.js"),
        Link(rel="stylesheet", href="style.css"),
        MarkdownJS(),
        HighlightJS(langs=["python", "javascript", "html", "css"]),
    ),
)


@app.get("/")
def main():
    return Div(
        D_front_matter(),
        D_title(
            H1(
                "TxT360: the most comprehensive, highest quality, and production ready pretraining dataset",
                cls="l-body",
                style="text-align: center;",
            ),
            Div(
                Img(src="images/llm360_logo.png"),
                id="title-plot",
                cls="main-plot-container l-page",
            ),
        ),
        D_article(
            D_contents(
                Nav(
                    H3("Table of Contents"),
                    Div(
                        A("TxT360", href="#_self"),
                        hx_get="/intro",
                        hx_target="#inner-text",
                    ),
                    Div(
                        Ul(
                            Li(
                                A(
                                    "About TxT360",
                                    href="/intro#section1",
                                    hx_get="/intro#section1",
                                    hx_target="#inner-text",
                                )
                            ),
                            Li(
                                A(
                                    "Global Deduplication",
                                    href="/intro#section2",
                                    hx_get="/intro#section2",
                                    hx_target="#inner-text",
                                )
                            ),
                            Li(
                                A(
                                    "Controllable Upweighting",
                                    href="/intro#section3",
                                    hx_get="/intro#section3",
                                    hx_target="#inner-text",
                                )
                            ),
                            Li(
                                A(
                                    "Full Documentation",
                                    href="/intro#section4",
                                    hx_get="/intro#section4",
                                    hx_target="#inner-text",
                                )
                            ),
                        ),
                    ),
                    Div(
                        A("Overview", href="#inner-text"),
                        hx_get="/overview",
                        hx_target="#inner-text",
                    ),
                    Div(
                        A("Global Processing Steps", href="#inner-text"),
                        hx_get="/common",
                        hx_target="#inner-text",
                    ),
                    Div(
                        A("Web Data", href="#inner-text"),
                        hx_get="/webdata",
                        hx_target="#inner-text",
                    ),
                    Div(
                        A("Curated Sources", href="#inner-text"),
                        hx_get="/curated",
                        hx_target="#inner-text",
                    ),
                    Div(
                        A("TxT360 Results", href="#inner-text"),
                        hx_get="/results",
                        hx_target="#inner-text",
                    ),
                    role="navigation",
                    cls="l-text figcaption",
                ),
            ),
            intro(),
        ),
    )

intro_text = P(
"""Pretraining performant large language models (LLMs) requires trillions of tokens of high quality data. Many prior work, including our previous pretraining projects Amber-7B, Crystal-7B, and K2-65B have demonstrated how data curation is a ‘make-or-break’ decision for model quality and capability.""")

intro_list = P("""We present TxT360, the Trillion eXtracted Text corpus, a 5.7T token dataset for pretraining projects that:""")

intro_list1 = Ol(
                Li("Curates commonly used pretraining datasets, including all CommonCrawl"),
                Li("Employs carefully selected filters designed for each data source"),
                Li("Provides only unique data elements via globally deduplicated across all datasets"),
                Li("Retains all deduplication metadata for custom upweighting"),
                Li("Is Production ready! Download here [link to HF repo]")
)


previous_background =  P(
                """ The quality and size of a pre-training dataset
                    play a crucial role in the performance of large
                    language models (LLMs). The community has
                    introduced a variety of datasets for this purpose,
                    including purely web-based datasets like RefinedWeb
                    [1], RedPajama-Data-V2 [2], DCLM [3], and
                    FineWeb [4], as well as comprehensive datasets
                    derived from multiple highly-curated data sources
                    such as The Pile [5], RedPajama-Data-V1 [6], and
                    Dolma [7] . It is commonly known that web-based
                    datasets provide a vast quantity of data, while
                    highly-curated multi-source datasets consistently
                    deliver high quality and diversity, both critical
                    for effective LLM pre-training.  However, despite
                    the advancements in both types of data, each type
                    of dataset has its limitations. For instance, the
                    processing scripts for the web dataset, RefinedWeb,
                    known for its high quality, are not public, and
                    only about 10% of the entire dataset has been
                    disclosed. Conversely, the web component of
                    existing highly-curated multi-source datasets is
                    relatively small compared to purely web-based
                    datasets, limiting their coverage and diversity
                    compared to the scale of information from the
                    internet.  By integrating the extensive reach of
                    web data with the exceptional quality of curated
                    sources, TxT360 is crafted to meet and surpass the
                    rigorous standards required for state-of-the-art
                    LLM pre-training. """
            )
previous_content =  P("""The performance of a large language model (LLM)
                    depends heavily on the quality and size of its
                    pretraining dataset. However, the pretraining
                    datasets for state-of-the-art open LLMs like Llama
                    3 and Mixtral are not publicly available and very
                    little is known about how they were created.
                    Reading time: 45 min. For the best reading
                    experience, we recommend not using a mobile phone.
                    Recently, we released 🍷 FineWeb, a new,
                    large-scale (15-trillion tokens, 44TB disk space)
                    dataset for LLM pretraining. FineWeb is derived
                    from 96 CommonCrawl snapshots and produces
                    better-performing LLMs than other open pretraining
                    datasets. To bring more clarity in machine learning
                    and advance the open understanding of how to train
                    good quality large language models, we carefully
                    documented and ablated all of the design choices
                    used in FineWeb, including in-depth investigations
                    of deduplication and filtering strategies. The
                    present long form report is a deep dive in how to
                    create a large and high-quality web-scale dataset
                    for LLM pretraining. The dataset itself, 🍷
                    FineWeb, is available here.  We are extremely
                    thankful to the whole distill.pub team (Christopher
                    Olah, Shan Carter, Ludwig Schubert in particular)
                    for creating the template on which we based this
                    blog post. Thanks also for inspiring us with
                    exquisitely crafted articles and blog posts.  In
                    this report we also introduce 📚 FineWeb-Edu, a
                    subset of FineWeb constructed using scalable
                    automated high-quality annotations for educational
                    value, and which outperforms all openly accessible
                    web-datasets on a number of educational benchmarks
                    such as MMLU, ARC, and OpenBookQA. 📚 FineWeb-Edu
                    is available in two sizes/filtering-level: 1.3
                    trillion (very high educational content) and 5.4
                    trillion (high educational content) tokens (all
                    tokens are measured with GPT2 tokenizer). You can
                    download it here.  Both datasets are released under
                    the permissive ODC-By 1.0 license TLDR: This blog
                    covers a discussion on processing and evaluating
                    data quality at scale, the 🍷 FineWeb recipe
                    (listing and explaining all of our design choices),
                    and the process followed to create its 📚
                    FineWeb-Edu subset.""")







@app.get("/intro")
def intro():
    return Div(
        Section(
            H2("Introduction"),
            intro_text,
            intro_list,
            intro_list1,
            id="section1",
        ),
        Section(
            H3("Global Deduplication"),
            P("TxT360 curated a wide range of datasets, including a whopping 99 Common Crawl Dumps and  a list of high quality datasets: StackExchange, Wikipedia, Arxiv, USPTO, DM Math, HackerNews, Ubuntu IRC, Europarl, FreeLaw, PG19, S2ORC, PhilPapers, PubMed Abstracts, and PubMed Central. For the first time in a released dataset, we locally and globally deduplicated the data across each dataset creating the highest quality data available."),
            id="section2",
        ),
        Section(
            H3("Controllable Upweighting for Flexible Data Sample Weight Control"),
            P("In large-scale corpora like CommonCrawl, text duplication is a frequent occurrence. Duplication can be considered as a natural upsampling of some data points. Recent studies have highlighted the potential drawbacks of oversampling specific data points, which can negatively impact pretraining performance [2205.10487]. However, when samples are repeated appropriately, the performance can actually improve [2306.01116, 2305.16264, 2406.11794, FineWeb]. Despite this, there is currently no widely accepted best practice for data sampling, and it’s unlikely that a one-size-fits-all approach will emerge given the scale of these datasets. Previous work either leaves the deduplication process to the user (as seen in RedPajama V2 and DCLM-Pool) or provides a corpus that has been downsampled in a specific manner (such as in FineWeb and RefinedWeb)."),
            P("Given the high cost of deduplication, TxT360 offers a complete deduplication across all datasets (so you don’t have to). Additionally, TxT360 maintains detailed metadata for each sample, including the frequency and location of duplicates. This metadata gives pretrainers the flexibility to adjust the weight of samples as needed. In principle, one can recover the original dataset distribution (footnote: this approach also means a smaller size on disk). We will demonstrate a simple upsampling strategy that results in an effective pretraining dataset. "),
            id="section3",
        ),
        Section(
            H3("Full and Openly Documented Production Ready Pretraining Corpus"),
            P("We cover every aspect of the decisions made to produce the dataset, including document selection, filtering, quality assurance, deduplication, standardization and PII.  Our reasoning is thoroughly explained, ensuring transparency and replicability. "),
            P("Our code is open sourced here[link to github]."),
            P("The dataset is ready for immediate download directly from Hugging Face [link]."),
            P("In the remainder of this blog post, we will walk you through the entire process and the rationale behind each decision. Enjoy!"),
            id="section4",
        ),
        id="inner-text",
    )

rt("/overview")(overview.overview)

rt("/curated")(curated.curated)

rt("/webdata")(web.web_data)
rt("/webdata/{target}")(web.update)

rt("/common")(common.common_steps)

rt("/results")(results.results)

serve()