jsulz HF staff commited on
Commit
f09a364
1 Parent(s): f20b5b5

merging ann-changes

Browse files
Files changed (1) hide show
  1. app.py +117 -42
app.py CHANGED
@@ -211,6 +211,53 @@ def cumulative_growth_plot_analysis(cumulative_df, cumulative_df_compressed):
211
  return fig
212
 
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  def plot_total_sum(by_type_arr):
215
  # Sort the array by size in decreasing order
216
  by_type_arr = sorted(by_type_arr, key=lambda x: x[1])
@@ -274,7 +321,7 @@ def filter_by_extension_month(_df, _extension):
274
  fig.add_trace(
275
  go.Scatter(
276
  x=pivot_df.index,
277
- y=pivot_df[column] / 1e12, # Convert to TBs
278
  mode="lines",
279
  name=column,
280
  line=dict(color=px.colors.qualitative.Alphabet[i]),
@@ -351,71 +398,56 @@ with gr.Blocks() as demo:
351
 
352
  last_10_months = compare_last_10_months(cumulative_df, cumulative_df_compressed)
353
 
354
- by_repo_type = tabular_analysis(
355
  by_repo_type, cumulative_df, cumulative_df_compressed
356
  )
357
 
358
- # get the figure for the cumulative growth plot and the last 10 months dataframe
359
- fig = cumulative_growth_plot_analysis(cumulative_df, cumulative_df_compressed)
360
-
361
  # Add top level heading and introduction text
362
- gr.Markdown("# Git LFS Usage Across the Hub")
363
  gr.Markdown(
364
- "The Hugging Face Hub has just crossed 1,000,000 models - but where is all that data stored? Most of it is stored in Git LFS. This analysis dives into the LFS storage on the Hub, breaking down the data by repository type, file extension, and growth over time. The data is based on a snapshot of the Hub's LFS storage, starting in March 2022 and ending September 20th, 2024 (meaning the data is incomplete for September 2024). Right now, this is a one-time analysis, but as we do our work we hope to revisit and update the underlying data to provide more insights."
365
  )
366
-
367
  gr.Markdown(
368
- "Now, you might ask yourself, 'Why are you doing this?' Well, the [Xet Team](https://huggingface.co/xet-team) is a [new addition to Hugging Face](https://huggingface.co/blog/xethub-joins-hf), bringing a new way to store massive datasets and models to enable ML teams to operate like software teams: Quickly and without friction. Because this story all starts with storage, that's where we've begun with our own deep dives into what the Hub holds. As part of this, we've included a look at what happens with just one simple deduplication strategy - deduplicating at the file level. Read on to see more!"
369
  )
 
370
  gr.HTML(div_px(25))
371
  # Cumulative growth analysis
372
- gr.Markdown("## Repository Growth")
373
  gr.Markdown(
374
- "The plot below shows the growth of Git LFS storage on the Hub over the past two years. The solid lines represent the cumulative growth of models, spaces, and datasets, while the dashed lines represent the growth with file-level deduplication."
375
  )
376
- gr.Plot(fig)
 
 
 
377
 
378
  gr.HTML(div_px(5))
379
  # @TODO Talk to Allison about variant="panel"
380
  with gr.Row():
381
- with gr.Column(scale=1):
 
382
  gr.Markdown(
383
- "In this table, we can see what the final picture looks like as of September 20th, 2024, along with the potential file-level deduplication savings."
384
- )
385
- gr.Markdown(
386
- "To put this in context, the last [Common Crawl](https://commoncrawl.org/) download was [451 TBs](https://github.com/commoncrawl/cc-crawl-statistics/blob/master/stats/crawler/CC-MAIN-2024-38.json#L31). The Spaces repositories alone outpaces that! Meanwhile, between Datasets and Model repos, the Hub stores **64 Common Crawls** 🤯. Current estimates put file deduplication savings at approximately 3.24 PBs (7.2 Common Crawls)!"
387
  )
388
  with gr.Column(scale=3):
389
  # Convert the total size to petabytes and format to two decimal places
390
- by_repo_type = format_dataframe_size_column(
391
- by_repo_type,
392
  ["Total Size (PBs)", "Compressed Size (PBs)", "Dedupe Savings (PBs)"],
393
  )
394
- gr.Dataframe(by_repo_type)
395
-
396
- gr.HTML(div_px(5))
397
- with gr.Row():
398
- with gr.Column(scale=1):
399
- gr.Markdown(
400
- "The month-to-month growth of models, spaces, can be seen in the adjacent table. In 2024, the Hub has averaged nearly **2.3 PBs uploaded to LFS per month!** By the same token, the monthly file deduplication savings are nearly 225TBs. "
401
- )
402
-
403
- gr.Markdown(
404
- "Borrowing from the Common Crawl analogy, that's about *5 crawls* uploaded every month, with an _easy savings of half a crawl every month_ by deduplicating at the file-level!"
405
- )
406
- with gr.Column(scale=3):
407
- gr.Dataframe(last_10_months)
408
 
409
  gr.HTML(div_px(25))
410
  # File Extension analysis
411
- gr.Markdown("## File Extensions on the Hub")
412
  gr.Markdown(
413
- "Breaking this down by file extension, some interesting trends emerge. The following sections filter the analysis to the top 20 file extensions stored (in bytes) using LFS (which accounts for 82% of storage consumption)."
414
  )
415
  gr.Markdown(
416
- "As is evident in the chart below, [Safetensors](https://huggingface.co/docs/safetensors/en/index) is quickly becoming the defacto standard on the Hub for storing tensor files, accounting for over 7PBs (25%) of LFS storage. If you want to know why you'd want to check out YAF (yet another format), this explanation from the [Safetensors docs](https://github.com/huggingface/safetensors?tab=readme-ov-file#yet-another-format-) is a good place to start. Speaking of YAF, [GGUF (GPT-Generated Unified Format)](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) is also on the rise, accounting for 3.2 PBs (11%) of LFS storage. GGUF, like Safetensors, is a format for storing tensor files, with a different set of optimizations. The Hub has a few [built-in tools](https://huggingface.co/docs/hub/en/gguf) for working with GGUF."
417
  )
418
- # Get the top 10 file extnesions by size
419
  by_extension_size = by_extension.sort_values(by="size", ascending=False).head(22)
420
 
421
  # make a bar chart of the by_extension_size dataframe
@@ -445,20 +477,29 @@ with gr.Blocks() as demo:
445
 
446
  gr.HTML(div_px(5))
447
  gr.Markdown(
448
- "Below, we have a more detailed tabular view of the same top 20 file extensions by total size, number of files, and average file size."
 
 
 
 
 
 
 
 
 
 
449
  )
450
- gr.Dataframe(by_extension_size)
451
 
452
  gr.HTML(div_px(5))
453
- gr.Markdown("### File Extension Monthly Additions (in PBs)")
454
  gr.Markdown(
455
- "What if we want to see trends over time? The following area chart shows the number of bytes added to LFS storage each month, faceted by the most popular file extensions."
456
  )
457
  gr.Plot(area_plot_by_extension_month(by_extension_month))
458
 
459
  gr.HTML(div_px(5))
460
  gr.Markdown(
461
- "To dig a little deeper, the following dropdown allows you to filter the area chart by file extension. Because we're dealing with individual file extensions, the data is presented in terabytes (TBs)."
462
  )
463
 
464
  # build a dropdown using the unique values in the extension column
@@ -470,5 +511,39 @@ with gr.Blocks() as demo:
470
  _by_extension_month = gr.State(by_extension_month)
471
  gr.Plot(filter_by_extension_month, inputs=[_by_extension_month, extension])
472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  # launch the dang thing
474
  demo.launch()
 
211
  return fig
212
 
213
 
214
+ def cumulative_growth_single(_df):
215
+ """
216
+ Calculates the cumulative growth of models, spaces, and datasets over time and generates a plot and dataframe from the analysis.
217
+
218
+ Args:
219
+ df (DataFrame): The input dataframe containing the data.
220
+
221
+ Returns:
222
+ - fig (Figure): The Plotly figure showing the cumulative growth of models, spaces, and datasets over time.
223
+
224
+ Raises:
225
+ None
226
+ """
227
+
228
+ # Create a Plotly figure
229
+ fig = go.Figure()
230
+
231
+ # Define a color map for each type
232
+ color_map = {
233
+ "model": px.colors.qualitative.Alphabet[3],
234
+ "space": px.colors.qualitative.Alphabet[2],
235
+ "dataset": px.colors.qualitative.Alphabet[9],
236
+ }
237
+
238
+ # Add a scatter trace for each type
239
+ for column in _df.columns:
240
+ fig.add_trace(
241
+ go.Scatter(
242
+ x=_df.index,
243
+ y=_df[column] / 1e15, # Convert to petabytes
244
+ mode="lines",
245
+ name=column.capitalize(),
246
+ line=dict(color=color_map.get(column, "black")), # Use color map
247
+ )
248
+ )
249
+
250
+ # Update layout
251
+ fig.update_layout(
252
+ title="Cumulative Growth of Models, Spaces, and Datasets",
253
+ xaxis_title="Date",
254
+ yaxis_title="Size (PBs)",
255
+ legend_title="Type",
256
+ yaxis=dict(tickformat=".2f"), # Format y-axis labels to 2 decimal places
257
+ )
258
+ return fig
259
+
260
+
261
  def plot_total_sum(by_type_arr):
262
  # Sort the array by size in decreasing order
263
  by_type_arr = sorted(by_type_arr, key=lambda x: x[1])
 
321
  fig.add_trace(
322
  go.Scatter(
323
  x=pivot_df.index,
324
+ y=pivot_df[column] * 1e3,
325
  mode="lines",
326
  name=column,
327
  line=dict(color=px.colors.qualitative.Alphabet[i]),
 
398
 
399
  last_10_months = compare_last_10_months(cumulative_df, cumulative_df_compressed)
400
 
401
+ by_repo_type_analysis = tabular_analysis(
402
  by_repo_type, cumulative_df, cumulative_df_compressed
403
  )
404
 
 
 
 
405
  # Add top level heading and introduction text
406
+ gr.Markdown("# Git LFS Usage across the Hub")
407
  gr.Markdown(
408
+ "Ever wonder what the Hugging Face Hub holds? This is the space for you!"
409
  )
 
410
  gr.Markdown(
411
+ "The Hub stores all files using a combination of [Gitaly](https://gitlab.com/gitlab-org/gitaly) (small files) on EBS and [Git LFS](https://git-lfs.com/) (large files > 10MB) on S3. As part of the [Xet team](https://huggingface.co/xet-team), one of our goals is to improve Hub storage and transfer efficiency, and understanding how and what things are currently stored helps us establish a baseline. This analysis uses a snapshot of the Hub's Git LFS usage from March 2022 - September 2024, and we plan to update it regularly to track trends. We're starting with metrics around raw storage by repository type and size/count by file extension - if you're interested in other metrics, drop your suggestions in our [discussions](https://huggingface.co/spaces/xet-team/lfs-analysis/discussions)!"
412
  )
413
+
414
  gr.HTML(div_px(25))
415
  # Cumulative growth analysis
416
+ gr.Markdown("## Storage by Repository Type")
417
  gr.Markdown(
418
+ "The chart below shows the growth of Git LFS storage usage by repository type since March 2022."
419
  )
420
+
421
+ # get the figure for the cumulative growth plot without dedupe analysis
422
+ cumulative_fig = cumulative_growth_single(cumulative_df)
423
+ gr.Plot(cumulative_fig)
424
 
425
  gr.HTML(div_px(5))
426
  # @TODO Talk to Allison about variant="panel"
427
  with gr.Row():
428
+ with gr.Column(scale=2):
429
+ gr.Markdown("### Current Storage Usage")
430
  gr.Markdown(
431
+ "As of September 20, 2024, total files stored in Git LFS summed to almost 29 PB. To put this into perspective, the last [Common Crawl](https://commoncrawl.org/) download was [451 TBs](https://github.com/commoncrawl/cc-crawl-statistics/blob/master/stats/crawler/CC-MAIN-2024-38.json#L31) - the Hub stores the equivalent of more than **64 Common Crawls** 🤯."
 
 
 
432
  )
433
  with gr.Column(scale=3):
434
  # Convert the total size to petabytes and format to two decimal places
435
+ current_storage = format_dataframe_size_column(
436
+ by_repo_type_analysis,
437
  ["Total Size (PBs)", "Compressed Size (PBs)", "Dedupe Savings (PBs)"],
438
  )
439
+ gr.Dataframe(current_storage[["Repository Type", "Total Size (PBs)"]])
 
 
 
 
 
 
 
 
 
 
 
 
 
440
 
441
  gr.HTML(div_px(25))
442
  # File Extension analysis
443
+ gr.Markdown("## Large Files Stored by File Extension")
444
  gr.Markdown(
445
+ "What types of files are stored on the Hub? The Xet team's backend architecture allows for storage optimizations by file type, so seeing the breakdown of the most popular stored file types helps to prioritize our roadmap. The following sections filter the analysis to the top 20 file extensions stored (by bytes) using Git LFS. Taken together, these 20 file extensions account for 82% of the total bytes stored in LFS."
446
  )
447
  gr.Markdown(
448
+ "[Safetensors](https://huggingface.co/docs/safetensors/en/index) is quickly becoming the defacto standard on the Hub for storing tensor files, accounting for over 7PBs (25%) of LFS storage. [GGUF (GPT-Generated Unified Format)](https://huggingface.co/docs/hub/gguf), a format for storing tensor files with a different set of optimizations, is also on the rise, accounting for 3.2 PBs (11%) of LFS storage."
449
  )
450
+ # Get the top 10 file extensions by size
451
  by_extension_size = by_extension.sort_values(by="size", ascending=False).head(22)
452
 
453
  # make a bar chart of the by_extension_size dataframe
 
477
 
478
  gr.HTML(div_px(5))
479
  gr.Markdown(
480
+ "This tabular view shows the same top 20 file extensions by total stored size, number of files, and average file size."
481
+ )
482
+ gr.Dataframe(
483
+ by_extension_size[
484
+ [
485
+ "File Extension",
486
+ "Total Size (PBs)",
487
+ "Number of Files",
488
+ "Average File Size (MBs)",
489
+ ]
490
+ ]
491
  )
 
492
 
493
  gr.HTML(div_px(5))
494
+ gr.Markdown("### Storage Growth by File Extension (Monthly PBs Added)")
495
  gr.Markdown(
496
+ "The following area chart shows the number of bytes added to LFS storage each month, faceted by file extension."
497
  )
498
  gr.Plot(area_plot_by_extension_month(by_extension_month))
499
 
500
  gr.HTML(div_px(5))
501
  gr.Markdown(
502
+ "To dig deeper, use the dropdown to filter by file extension and see the bytes added (in TBs) each month for specific file types."
503
  )
504
 
505
  # build a dropdown using the unique values in the extension column
 
511
  _by_extension_month = gr.State(by_extension_month)
512
  gr.Plot(filter_by_extension_month, inputs=[_by_extension_month, extension])
513
 
514
+ gr.HTML(div_px(25))
515
+ # Optimizations
516
+
517
+ gr.Markdown("## Optimization 1: File-level Deduplication")
518
+ gr.Markdown(
519
+ "The first improvement we can make to Hub storage is to add file-level deduplication. Since forking any Hub repository makes copies of the files, a scan of existing files unsurprisingly shows that some files match exactly. The following chart shows the storage growth chart from above with additional dashed lines showing the potential savings from deduplicating at the file level."
520
+ )
521
+ dedupe_fig = cumulative_growth_plot_analysis(
522
+ cumulative_df, cumulative_df_compressed
523
+ )
524
+ gr.Plot(dedupe_fig)
525
+
526
+ gr.HTML(div_px(5))
527
+ # @TODO Talk to Allison about variant="panel"
528
+ with gr.Row():
529
+ with gr.Column(scale=1):
530
+ gr.Markdown("### Current Storage Usage + File-level Deduplication")
531
+ gr.Markdown(
532
+ "This simple change to the storage backend will save 3.24 PBs (the equivalent of 7.2 Common Crawls)."
533
+ )
534
+ with gr.Column(scale=3):
535
+ # Convert the total size to petabytes and format to two decimal places
536
+ gr.Dataframe(by_repo_type)
537
+
538
+ gr.HTML(div_px(5))
539
+ with gr.Row():
540
+ with gr.Column(scale=1):
541
+ gr.Markdown("### Month-to-Month Growth + File-level Deduplication")
542
+ gr.Markdown(
543
+ "This table shows month-to-month growth in model, dataset, and space storage. In 2024, the Hub has averaged nearly **2.3 PBs uploaded to Git LFS per month**. Deduplicating at the file level saves nearly 225 TB (half a Common Crawl) monthly."
544
+ )
545
+ with gr.Column(scale=3):
546
+ gr.Dataframe(last_10_months)
547
+
548
  # launch the dang thing
549
  demo.launch()