Apply pre-commit
Browse files- app.py +4 -5
- app_training.py +42 -47
- app_upload.py +3 -1
- trainer.py +8 -3
- uploader.py +5 -5
app.py
CHANGED
@@ -38,7 +38,7 @@ You can use "T4 small/medium" to run this demo.
|
|
38 |
</center>
|
39 |
'''
|
40 |
|
41 |
-
HF_TOKEN_NOT_SPECIFIED_WARNING = f'''The environment variable `HF_TOKEN` is not specified. Feel free to specify your Hugging Face token with write permission if you don't want to manually provide it for every run.
|
42 |
<center>
|
43 |
You can check and create your Hugging Face tokens <a href="https://huggingface.co/settings/tokens" target="_blank">here</a>.
|
44 |
You can specify environment variables in the "Repository secrets" section of the {SETTINGS} tab.
|
@@ -63,9 +63,8 @@ with gr.Blocks(css='style.css') as demo:
|
|
63 |
show_warning(SHARED_UI_WARNING)
|
64 |
elif not torch.cuda.is_available():
|
65 |
show_warning(CUDA_NOT_AVAILABLE_WARNING)
|
66 |
-
elif(not
|
67 |
show_warning(INVALID_GPU_WARNING)
|
68 |
-
|
69 |
|
70 |
gr.Markdown(TITLE)
|
71 |
with gr.Tabs():
|
@@ -78,8 +77,8 @@ with gr.Blocks(css='style.css') as demo:
|
|
78 |
- You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
|
79 |
''')
|
80 |
create_upload_demo(HF_TOKEN)
|
81 |
-
|
82 |
if not HF_TOKEN:
|
83 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
84 |
|
85 |
-
demo.queue(max_size=1).launch(share=False)
|
|
|
38 |
</center>
|
39 |
'''
|
40 |
|
41 |
+
HF_TOKEN_NOT_SPECIFIED_WARNING = f'''The environment variable `HF_TOKEN` is not specified. Feel free to specify your Hugging Face token with write permission if you don't want to manually provide it for every run.
|
42 |
<center>
|
43 |
You can check and create your Hugging Face tokens <a href="https://huggingface.co/settings/tokens" target="_blank">here</a>.
|
44 |
You can specify environment variables in the "Repository secrets" section of the {SETTINGS} tab.
|
|
|
63 |
show_warning(SHARED_UI_WARNING)
|
64 |
elif not torch.cuda.is_available():
|
65 |
show_warning(CUDA_NOT_AVAILABLE_WARNING)
|
66 |
+
elif (not 'T4' in GPU_DATA):
|
67 |
show_warning(INVALID_GPU_WARNING)
|
|
|
68 |
|
69 |
gr.Markdown(TITLE)
|
70 |
with gr.Tabs():
|
|
|
77 |
- You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
|
78 |
''')
|
79 |
create_upload_demo(HF_TOKEN)
|
80 |
+
|
81 |
if not HF_TOKEN:
|
82 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
83 |
|
84 |
+
demo.queue(max_size=1).launch(share=False)
|
app_training.py
CHANGED
@@ -32,18 +32,23 @@ def create_training_demo(trainer: Trainer,
|
|
32 |
with gr.Box():
|
33 |
gr.Markdown('Training Parameters')
|
34 |
with gr.Row():
|
35 |
-
base_model = gr.Text(
|
36 |
-
|
37 |
-
|
|
|
38 |
resolution = gr.Dropdown(choices=['512', '768'],
|
39 |
value='512',
|
40 |
label='Resolution',
|
41 |
visible=False)
|
42 |
-
|
43 |
-
input_token = gr.Text(label=
|
44 |
-
|
|
|
|
|
45 |
num_training_steps = gr.Number(
|
46 |
-
label='Number of Training Steps',
|
|
|
|
|
47 |
learning_rate = gr.Number(label='Learning Rate',
|
48 |
value=0.000035)
|
49 |
gradient_accumulation = gr.Number(
|
@@ -57,33 +62,36 @@ def create_training_demo(trainer: Trainer,
|
|
57 |
randomize=True,
|
58 |
value=0)
|
59 |
fp16 = gr.Checkbox(label='FP16', value=True)
|
60 |
-
use_8bit_adam = gr.Checkbox(label='Use 8bit Adam',
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
67 |
gr.Markdown('''
|
68 |
- The base model must be a Stable Diffusion model compatible with [diffusers](https://github.com/huggingface/diffusers) library.
|
69 |
- Expected time to train a model for 300 steps: ~20 minutes with T4
|
70 |
- You can check the training status by pressing the "Open logs" button if you are running this on your Space.
|
71 |
''')
|
72 |
-
|
73 |
with gr.Row():
|
74 |
with gr.Column():
|
75 |
gr.Markdown('Output Model')
|
76 |
output_model_name = gr.Text(label='Name of your model',
|
77 |
placeholder='The surfer man',
|
78 |
max_lines=1)
|
79 |
-
validation_prompt = gr.Text(
|
|
|
|
|
|
|
80 |
with gr.Column():
|
81 |
gr.Markdown('Upload Settings')
|
82 |
with gr.Row():
|
83 |
-
upload_to_hub = gr.Checkbox(
|
84 |
-
|
85 |
-
use_private_repo = gr.Checkbox(label='Private',
|
86 |
-
value=True)
|
87 |
delete_existing_repo = gr.Checkbox(
|
88 |
label='Delete existing repo of the same name',
|
89 |
value=False)
|
@@ -91,45 +99,32 @@ def create_training_demo(trainer: Trainer,
|
|
91 |
label='Upload to',
|
92 |
choices=[_.value for _ in UploadTarget],
|
93 |
value=UploadTarget.MODEL_LIBRARY.value)
|
94 |
-
|
95 |
remove_gpu_after_training = gr.Checkbox(
|
96 |
label='Remove GPU after training',
|
97 |
value=False,
|
98 |
interactive=bool(os.getenv('SPACE_ID')),
|
99 |
visible=False)
|
100 |
run_button = gr.Button('Start Training')
|
101 |
-
|
102 |
with gr.Box():
|
103 |
gr.Markdown('Output message')
|
104 |
output_message = gr.Markdown()
|
105 |
|
106 |
if pipe is not None:
|
107 |
run_button.click(fn=pipe.clear)
|
108 |
-
run_button.click(
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
seed,
|
121 |
-
fp16,
|
122 |
-
use_8bit_adam,
|
123 |
-
checkpointing_steps,
|
124 |
-
validation_epochs,
|
125 |
-
upload_to_hub,
|
126 |
-
use_private_repo,
|
127 |
-
delete_existing_repo,
|
128 |
-
upload_to,
|
129 |
-
remove_gpu_after_training,
|
130 |
-
input_token
|
131 |
-
],
|
132 |
-
outputs=output_message)
|
133 |
return demo
|
134 |
|
135 |
|
|
|
32 |
with gr.Box():
|
33 |
gr.Markdown('Training Parameters')
|
34 |
with gr.Row():
|
35 |
+
base_model = gr.Text(
|
36 |
+
label='Base Model',
|
37 |
+
value='CompVis/stable-diffusion-v1-4',
|
38 |
+
max_lines=1)
|
39 |
resolution = gr.Dropdown(choices=['512', '768'],
|
40 |
value='512',
|
41 |
label='Resolution',
|
42 |
visible=False)
|
43 |
+
|
44 |
+
input_token = gr.Text(label='Hugging Face Write Token',
|
45 |
+
placeholder='',
|
46 |
+
visible=False if hf_token else True)
|
47 |
+
with gr.Accordion('Advanced settings', open=False):
|
48 |
num_training_steps = gr.Number(
|
49 |
+
label='Number of Training Steps',
|
50 |
+
value=300,
|
51 |
+
precision=0)
|
52 |
learning_rate = gr.Number(label='Learning Rate',
|
53 |
value=0.000035)
|
54 |
gradient_accumulation = gr.Number(
|
|
|
62 |
randomize=True,
|
63 |
value=0)
|
64 |
fp16 = gr.Checkbox(label='FP16', value=True)
|
65 |
+
use_8bit_adam = gr.Checkbox(label='Use 8bit Adam',
|
66 |
+
value=False)
|
67 |
+
checkpointing_steps = gr.Number(
|
68 |
+
label='Checkpointing Steps',
|
69 |
+
value=1000,
|
70 |
+
precision=0)
|
71 |
+
validation_epochs = gr.Number(
|
72 |
+
label='Validation Epochs', value=100, precision=0)
|
73 |
gr.Markdown('''
|
74 |
- The base model must be a Stable Diffusion model compatible with [diffusers](https://github.com/huggingface/diffusers) library.
|
75 |
- Expected time to train a model for 300 steps: ~20 minutes with T4
|
76 |
- You can check the training status by pressing the "Open logs" button if you are running this on your Space.
|
77 |
''')
|
78 |
+
|
79 |
with gr.Row():
|
80 |
with gr.Column():
|
81 |
gr.Markdown('Output Model')
|
82 |
output_model_name = gr.Text(label='Name of your model',
|
83 |
placeholder='The surfer man',
|
84 |
max_lines=1)
|
85 |
+
validation_prompt = gr.Text(
|
86 |
+
label='Validation Prompt',
|
87 |
+
placeholder=
|
88 |
+
'prompt to test the model, e.g: a dog is surfing')
|
89 |
with gr.Column():
|
90 |
gr.Markdown('Upload Settings')
|
91 |
with gr.Row():
|
92 |
+
upload_to_hub = gr.Checkbox(label='Upload model to Hub',
|
93 |
+
value=True)
|
94 |
+
use_private_repo = gr.Checkbox(label='Private', value=True)
|
|
|
95 |
delete_existing_repo = gr.Checkbox(
|
96 |
label='Delete existing repo of the same name',
|
97 |
value=False)
|
|
|
99 |
label='Upload to',
|
100 |
choices=[_.value for _ in UploadTarget],
|
101 |
value=UploadTarget.MODEL_LIBRARY.value)
|
102 |
+
|
103 |
remove_gpu_after_training = gr.Checkbox(
|
104 |
label='Remove GPU after training',
|
105 |
value=False,
|
106 |
interactive=bool(os.getenv('SPACE_ID')),
|
107 |
visible=False)
|
108 |
run_button = gr.Button('Start Training')
|
109 |
+
|
110 |
with gr.Box():
|
111 |
gr.Markdown('Output message')
|
112 |
output_message = gr.Markdown()
|
113 |
|
114 |
if pipe is not None:
|
115 |
run_button.click(fn=pipe.clear)
|
116 |
+
run_button.click(
|
117 |
+
fn=trainer.run,
|
118 |
+
inputs=[
|
119 |
+
training_video, training_prompt, output_model_name,
|
120 |
+
delete_existing_repo, validation_prompt, base_model,
|
121 |
+
resolution, num_training_steps, learning_rate,
|
122 |
+
gradient_accumulation, seed, fp16, use_8bit_adam,
|
123 |
+
checkpointing_steps, validation_epochs, upload_to_hub,
|
124 |
+
use_private_repo, delete_existing_repo, upload_to,
|
125 |
+
remove_gpu_after_training, input_token
|
126 |
+
],
|
127 |
+
outputs=output_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
return demo
|
129 |
|
130 |
|
app_upload.py
CHANGED
@@ -70,7 +70,9 @@ def create_upload_demo(hf_token: str | None) -> gr.Blocks:
|
|
70 |
choices=[_.value for _ in UploadTarget],
|
71 |
value=UploadTarget.MODEL_LIBRARY.value)
|
72 |
model_name = gr.Textbox(label='Model Name')
|
73 |
-
input_token = gr.Text(label=
|
|
|
|
|
74 |
upload_button = gr.Button('Upload')
|
75 |
gr.Markdown(f'''
|
76 |
- You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Tune-A-Video Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}).
|
|
|
70 |
choices=[_.value for _ in UploadTarget],
|
71 |
value=UploadTarget.MODEL_LIBRARY.value)
|
72 |
model_name = gr.Textbox(label='Model Name')
|
73 |
+
input_token = gr.Text(label='Hugging Face Write Token',
|
74 |
+
placeholder='',
|
75 |
+
visible=False if hf_token else True)
|
76 |
upload_button = gr.Button('Upload')
|
77 |
gr.Markdown(f'''
|
78 |
- You can upload your trained model to your personal profile (i.e. https://huggingface.co/{{your_username}}/{{model_name}}) or to the public [Tune-A-Video Library](https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}) (i.e. https://huggingface.co/{MODEL_LIBRARY_ORG_NAME}/{{model_name}}).
|
trainer.py
CHANGED
@@ -23,6 +23,7 @@ URL_TO_JOIN_MODEL_LIBRARY_ORG = 'https://huggingface.co/organizations/Tune-A-Vid
|
|
23 |
ORIGINAL_SPACE_ID = 'Tune-A-Video-library/Tune-A-Video-Training-UI'
|
24 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
25 |
|
|
|
26 |
class Trainer:
|
27 |
def __init__(self, hf_token: str | None = None):
|
28 |
self.hf_token = hf_token
|
@@ -73,7 +74,9 @@ class Trainer:
|
|
73 |
input_token: str,
|
74 |
) -> str:
|
75 |
if SPACE_ID == ORIGINAL_SPACE_ID:
|
76 |
-
raise gr.Error(
|
|
|
|
|
77 |
if not torch.cuda.is_available():
|
78 |
raise gr.Error('CUDA is not available.')
|
79 |
if training_video is None:
|
@@ -97,7 +100,8 @@ class Trainer:
|
|
97 |
output_dir.mkdir(parents=True)
|
98 |
|
99 |
if upload_to_hub:
|
100 |
-
self.join_model_library_org(
|
|
|
101 |
|
102 |
config = OmegaConf.load('Tune-A-Video/configs/man-surfing.yaml')
|
103 |
config.pretrained_model_path = self.download_base_model(base_model)
|
@@ -154,7 +158,8 @@ class Trainer:
|
|
154 |
if remove_gpu_after_training:
|
155 |
space_id = os.getenv('SPACE_ID')
|
156 |
if space_id:
|
157 |
-
api = HfApi(
|
|
|
158 |
api.request_space_hardware(repo_id=space_id,
|
159 |
hardware='cpu-basic')
|
160 |
|
|
|
23 |
ORIGINAL_SPACE_ID = 'Tune-A-Video-library/Tune-A-Video-Training-UI'
|
24 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
25 |
|
26 |
+
|
27 |
class Trainer:
|
28 |
def __init__(self, hf_token: str | None = None):
|
29 |
self.hf_token = hf_token
|
|
|
74 |
input_token: str,
|
75 |
) -> str:
|
76 |
if SPACE_ID == ORIGINAL_SPACE_ID:
|
77 |
+
raise gr.Error(
|
78 |
+
'This Space does not work on this Shared UI. Duplicate the Space and attribute a GPU'
|
79 |
+
)
|
80 |
if not torch.cuda.is_available():
|
81 |
raise gr.Error('CUDA is not available.')
|
82 |
if training_video is None:
|
|
|
100 |
output_dir.mkdir(parents=True)
|
101 |
|
102 |
if upload_to_hub:
|
103 |
+
self.join_model_library_org(
|
104 |
+
self.hf_token if self.hf_token else input_token)
|
105 |
|
106 |
config = OmegaConf.load('Tune-A-Video/configs/man-surfing.yaml')
|
107 |
config.pretrained_model_path = self.download_base_model(base_model)
|
|
|
158 |
if remove_gpu_after_training:
|
159 |
space_id = os.getenv('SPACE_ID')
|
160 |
if space_id:
|
161 |
+
api = HfApi(
|
162 |
+
token=self.hf_token if self.hf_token else input_token)
|
163 |
api.request_space_hardware(repo_id=space_id,
|
164 |
hardware='cpu-basic')
|
165 |
|
uploader.py
CHANGED
@@ -15,23 +15,23 @@ class Uploader:
|
|
15 |
private: bool = True,
|
16 |
delete_existing_repo: bool = False,
|
17 |
input_token: str | None = None) -> str:
|
18 |
-
|
19 |
api = HfApi(token=self.hf_token if self.hf_token else input_token)
|
20 |
-
|
21 |
if not folder_path:
|
22 |
raise ValueError
|
23 |
if not repo_name:
|
24 |
raise ValueError
|
25 |
if not organization:
|
26 |
organization = api.whoami()['name']
|
27 |
-
|
28 |
repo_id = f'{organization}/{repo_name}'
|
29 |
if delete_existing_repo:
|
30 |
try:
|
31 |
-
|
32 |
except Exception:
|
33 |
pass
|
34 |
-
try:
|
35 |
api.create_repo(repo_id, repo_type=repo_type, private=private)
|
36 |
api.upload_folder(repo_id=repo_id,
|
37 |
folder_path=folder_path,
|
|
|
15 |
private: bool = True,
|
16 |
delete_existing_repo: bool = False,
|
17 |
input_token: str | None = None) -> str:
|
18 |
+
|
19 |
api = HfApi(token=self.hf_token if self.hf_token else input_token)
|
20 |
+
|
21 |
if not folder_path:
|
22 |
raise ValueError
|
23 |
if not repo_name:
|
24 |
raise ValueError
|
25 |
if not organization:
|
26 |
organization = api.whoami()['name']
|
27 |
+
|
28 |
repo_id = f'{organization}/{repo_name}'
|
29 |
if delete_existing_repo:
|
30 |
try:
|
31 |
+
api.delete_repo(repo_id, repo_type=repo_type)
|
32 |
except Exception:
|
33 |
pass
|
34 |
+
try:
|
35 |
api.create_repo(repo_id, repo_type=repo_type, private=private)
|
36 |
api.upload_folder(repo_id=repo_id,
|
37 |
folder_path=folder_path,
|