ysharma HF staff commited on
Commit
8a25635
1 Parent(s): 4ed6358

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -37
app.py CHANGED
@@ -171,43 +171,43 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
171
  value=44,
172
  info="Number of transformer layers used in the model"
173
  )
174
- with gr.Column("User Defined"):
175
- gr.Markdown("## User Defined")
176
- num_gpus = gr.Number(
177
- label="Number of GPUs",
178
- value=1,
179
- info="Number of GPUs used for training"
180
- )
181
- tensor_parallel_size = gr.Number(
182
- label="Tensor Parallel Size",
183
- value=1,
184
- info="Tensor parallel degree (1 if not used)"
185
- )
186
- pipeline_parallel_size = gr.Number(
187
- label="Pipeline Parallel Size",
188
- value=1,
189
- info="Pipeline parallel degree (1 if not used)"
190
- )
191
- batch_size_per_gpu = gr.Number(
192
- label="Batch Size per GPU",
193
- value=8,
194
- info="Batch size per GPU"
195
- )
196
- ffn_expansion_factor = gr.Number(
197
- label="FFN Expansion Factor",
198
- value=4,
199
- info="How much the MLP hidden size expands"
200
- )
201
- is_mixed_precision = gr.Checkbox(
202
- label="Mixed Precision",
203
- value=True,
204
- info="Whether mixed precision is enabled"
205
- )
206
- misc_mem_gib = gr.Number(
207
- label="Miscellaneous Memory Overhead (GiB)",
208
- value=5,
209
- info="Miscellaneous memory overhead per GPU by DL frameworks, communication libraries, etc."
210
- )
211
 
212
  calc_memory_button = gr.Button("Calculate Memory")
213
  memory_result = gr.Textbox(label="Memory Calculation Result", interactive=False)
 
171
  value=44,
172
  info="Number of transformer layers used in the model"
173
  )
174
+ with gr.Column("User Defined"):
175
+ gr.Markdown("## User Defined")
176
+ num_gpus = gr.Number(
177
+ label="Number of GPUs",
178
+ value=1,
179
+ info="Number of GPUs used for training"
180
+ )
181
+ tensor_parallel_size = gr.Number(
182
+ label="Tensor Parallel Size",
183
+ value=1,
184
+ info="Tensor parallel degree (1 if not used)"
185
+ )
186
+ pipeline_parallel_size = gr.Number(
187
+ label="Pipeline Parallel Size",
188
+ value=1,
189
+ info="Pipeline parallel degree (1 if not used)"
190
+ )
191
+ batch_size_per_gpu = gr.Number(
192
+ label="Batch Size per GPU",
193
+ value=8,
194
+ info="Batch size per GPU"
195
+ )
196
+ ffn_expansion_factor = gr.Number(
197
+ label="FFN Expansion Factor",
198
+ value=4,
199
+ info="How much the MLP hidden size expands"
200
+ )
201
+ is_mixed_precision = gr.Checkbox(
202
+ label="Mixed Precision",
203
+ value=True,
204
+ info="Whether mixed precision is enabled"
205
+ )
206
+ misc_mem_gib = gr.Number(
207
+ label="Miscellaneous Memory Overhead (GiB)",
208
+ value=5,
209
+ info="Miscellaneous memory overhead per GPU by DL frameworks, communication libraries, etc."
210
+ )
211
 
212
  calc_memory_button = gr.Button("Calculate Memory")
213
  memory_result = gr.Textbox(label="Memory Calculation Result", interactive=False)