bawolf commited on
Commit
160b74c
1 Parent(s): 0cb12a1

just upload through git

Browse files
Files changed (5) hide show
  1. .dockerignore.cog.bak +43 -0
  2. .envrc +1 -1
  3. .gitignore +1 -0
  4. .sample.envrc +1 -0
  5. scripts/upload_to_hub.py +0 -44
.dockerignore.cog.bak ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by replicate/cog
2
+ __pycache__
3
+ *.pyc
4
+ *.pyo
5
+ *.pyd
6
+ .Python
7
+ env
8
+ pip-log.txt
9
+ pip-delete-this-directory.txt
10
+ .tox
11
+ .coverage
12
+ .coverage.*
13
+ .cache
14
+ nosetests.xml
15
+ coverage.xml
16
+ *.cover
17
+ *.log
18
+ .git
19
+ .mypy_cache
20
+ .pytest_cache
21
+ .hypothesis
22
+
23
+ # generated by replicate/cog
24
+ __pycache__
25
+ *.pyc
26
+ *.pyo
27
+ *.pyd
28
+ .Python
29
+ env
30
+ pip-log.txt
31
+ pip-delete-this-directory.txt
32
+ .tox
33
+ .coverage
34
+ .coverage.*
35
+ .cache
36
+ nosetests.xml
37
+ coverage.xml
38
+ *.cover
39
+ *.log
40
+ .git
41
+ .mypy_cache
42
+ .pytest_cache
43
+ .hypothesis
.envrc CHANGED
@@ -1 +1 @@
1
- HF_USERNAME=bawolf
 
1
+ export HF_USERNAME=bawolf
.gitignore CHANGED
@@ -25,6 +25,7 @@ wheels/
25
  venv/
26
  ENV/
27
  .env
 
28
 
29
  # IDE
30
  .idea/
 
25
  venv/
26
  ENV/
27
  .env
28
+ .envrc
29
 
30
  # IDE
31
  .idea/
.sample.envrc ADDED
@@ -0,0 +1 @@
 
 
1
+ export HF_USERNAME=bawolf
scripts/upload_to_hub.py DELETED
@@ -1,44 +0,0 @@
1
- from transformers import CLIPProcessor
2
- from huggingface_hub import HfApi
3
- import os
4
- from dotenv import load_dotenv
5
- import torch
6
- from src.models.model import create_model
7
-
8
- def upload_model_to_hub(hf_username):
9
- # Initialize huggingface api
10
- api = HfApi()
11
-
12
- # Load your custom model
13
- num_classes = 3 # windmills, halos, and swipes
14
- model = create_model(num_classes, "openai/clip-vit-large-patch14")
15
-
16
- # Load your trained weights
17
- state_dict = torch.load("./checkpoints/model.pth", map_location="cpu")
18
- model.load_state_dict(state_dict, strict=False)
19
-
20
- # Get the processor from the base CLIP model
21
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
22
-
23
- repo_id = f"{hf_username}/breaking-vision-clip-classifier"
24
-
25
- # Save model configuration and architecture
26
- config = {
27
- "num_classes": num_classes,
28
- "base_model": "openai/clip-vit-large-patch14",
29
- "class_labels": ["windmill", "halo", "swipe"],
30
- "model_type": "VariableLengthCLIP"
31
- }
32
-
33
- # Push to hub with config
34
- model.push_to_hub(
35
- repo_id,
36
- config_dict=config,
37
- commit_message="Upload custom CLIP-based dance classifier"
38
- )
39
- processor.push_to_hub(repo_id)
40
-
41
- if __name__ == "__main__":
42
- load_dotenv()
43
- hf_username = os.getenv("HF_USERNAME")
44
- upload_model_to_hub(hf_username)