Spaces:
Runtime error
[`refactor`]: Tab & URL syncing; parameter counts as model size; filtering; search (#89)
Browse files- Compute model size based on number of parameters (bd6a61b46998cdcc89eeb567850b734a48b87737)
- Refactor gradio Tabs initialization (56def8ad644fed5c5ef653500101899fe01c6876)
- Introduce tabs <-> URL relation for easier sharing (2d428eb2835e975816ff72bcda38cfe96323bcd3)
- Reintroduce missing markdown (e2b41c870d6e112347e1ba44c8620fcaf88bd719)
- Add search bar/filtering; always show Model Size (ab565badcfc1c2b02ddce0c2a0618875b5f8982f)
- Ignore voyage-lite model size (82d58936b4312ab7fb0ef838b7bac1df70a46d45)
- Remove debugging statement (d81785e017b21579643aab840f584d59d0c95179)
- Introduce new intervals: 100M & 250M and 250M & 500M (2eb890dc40c9a4e71475f42b024d703a9f45db7b)
- API -> Proprietary (cfacdeee424f776fb60c10adbe3f4a9bb04d9f2a)
- Add Sentence Transformers model type option (6c6aac59f6d2f5f1744c9618e30e7be17a82d0b9)
- Fix embedding dimensions if Dense module exists (e82960d3779fe6bec1618330b1cbf63c4d383e9d)
- Use separate proprietary models list (561360760efd385ce82ea67ebca7674d6526a515)
- Fix proprietary models disappearing after model size toggling (485f27b4dcf9a19d4c963c858641da9da2da4a33)
- Increment gradio SDK version (fefaea64210d697cb4a0cf4a24a0c156784fdb1d)
- Base "Open" models on the proprietary model list (2db25dc3a419e68be78dca9ef0e997e8e38588ae)
- Update e5-mistral-7b model size from 7110 to 7111 (418d26a052f7f729cdabf09aeba288b02581ba4b)
- Merge branch 'main' into model_size_parameters (7d3a9f6218e33c393a5d886b415a64c740f593ff)
- Add Memory Usage column to all tables (970b6a5470a7aef9a30ed14a27ae957f82c2b131)
- List Cohere-embed-english-v3.0 as proprietary (5bd316f1be45e51845d64c6908cfa7c1b18ba88a)
- Merge branch 'main' into model_size_parameters (0ebd4b87bbe1abf36714d2d16bbb08a528912334)
- Move globals around slightly (a8ba8f1f0e653322bb5447a2abfa437e3e74baba)
- Add parameter count for Google Gecko (4de60b86d8f1da6f6c10a7b9bb631f4165e18724)
- .gitignore +1 -0
- README.md +1 -1
- app.py +0 -0
- utils/__init__.py +0 -0
- utils/model_size.py +40 -0
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.pyc
|
@@ -4,7 +4,7 @@ emoji: 🥇
|
|
4 |
colorFrom: blue
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
tags:
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.20.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
tags:
|
The diff for this file is too large to render.
See raw diff
|
|
File without changes
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata, model_info as get_model_info, get_hf_file_metadata, hf_hub_url
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
+
|
6 |
+
# Map model IDs to the number of bytes used for one parameter. So, 4 bytes for fp32, 2 bytes for fp16, etc.
|
7 |
+
# By default, we assume that the model is stored in fp32.
|
8 |
+
KNOWN_BYTES_PER_PARAM = {}
|
9 |
+
|
10 |
+
|
11 |
+
def get_model_parameters_memory(model_info: ModelInfo):
|
12 |
+
'''Get the size of the model in million of parameters.'''
|
13 |
+
try:
|
14 |
+
safetensors = get_safetensors_metadata(model_info.id)
|
15 |
+
num_parameters = sum(safetensors.parameter_count.values())
|
16 |
+
return round(num_parameters / 1e6), round(num_parameters * 4 / 1024**3, 2)
|
17 |
+
except Exception as e:
|
18 |
+
pass
|
19 |
+
|
20 |
+
filenames = [sib.rfilename for sib in model_info.siblings]
|
21 |
+
if "pytorch_model.bin" in filenames:
|
22 |
+
url = hf_hub_url(model_info.id, filename="pytorch_model.bin")
|
23 |
+
meta = get_hf_file_metadata(url)
|
24 |
+
bytes_per_param = KNOWN_BYTES_PER_PARAM.get(model_info.id, 4)
|
25 |
+
return round(meta.size / bytes_per_param / 1e6), round(meta.size / 1024**3, 2)
|
26 |
+
|
27 |
+
if "pytorch_model.bin.index.json" in filenames:
|
28 |
+
index_path = hf_hub_download(model_info.id, filename="pytorch_model.bin.index.json")
|
29 |
+
"""
|
30 |
+
{
|
31 |
+
"metadata": {
|
32 |
+
"total_size": 28272820224
|
33 |
+
},....
|
34 |
+
"""
|
35 |
+
size = json.load(open(index_path))
|
36 |
+
bytes_per_param = KNOWN_BYTES_PER_PARAM.get(model_info.id, 4)
|
37 |
+
if ("metadata" in size) and ("total_size" in size["metadata"]):
|
38 |
+
return round(size["metadata"]["total_size"] / bytes_per_param / 1e6), round(size["metadata"]["total_size"] / 1024**3, 2)
|
39 |
+
|
40 |
+
return None, None
|