Spaces:
Running
on
Zero
Running
on
Zero
disable tqdm
Browse files- app.py +1 -1
- app_onnx.py +1 -1
app.py
CHANGED
@@ -51,7 +51,7 @@ def generate(model: MIDIModel, prompt=None, batch_size=1, max_len=512, temp=1.0,
|
|
51 |
mode="constant", constant_values=tokenizer.pad_id)
|
52 |
input_tensor = torch.from_numpy(prompt).to(dtype=torch.long, device=model.device)
|
53 |
cur_len = input_tensor.shape[1]
|
54 |
-
bar = tqdm.tqdm(desc="generating", total=max_len - cur_len)
|
55 |
cache1 = DynamicCache()
|
56 |
past_len = 0
|
57 |
with bar:
|
|
|
51 |
mode="constant", constant_values=tokenizer.pad_id)
|
52 |
input_tensor = torch.from_numpy(prompt).to(dtype=torch.long, device=model.device)
|
53 |
cur_len = input_tensor.shape[1]
|
54 |
+
bar = tqdm.tqdm(desc="generating", total=max_len - cur_len, disable=in_space)
|
55 |
cache1 = DynamicCache()
|
56 |
past_len = 0
|
57 |
with bar:
|
app_onnx.py
CHANGED
@@ -107,7 +107,7 @@ def generate(model, prompt=None, batch_size=1, max_len=512, temp=1.0, top_p=0.98
|
|
107 |
mode="constant", constant_values=tokenizer.pad_id)
|
108 |
input_tensor = prompt
|
109 |
cur_len = input_tensor.shape[1]
|
110 |
-
bar = tqdm.tqdm(desc="generating", total=max_len - cur_len)
|
111 |
model0_inputs = {}
|
112 |
model0_outputs = {}
|
113 |
emb_size = 1024
|
|
|
107 |
mode="constant", constant_values=tokenizer.pad_id)
|
108 |
input_tensor = prompt
|
109 |
cur_len = input_tensor.shape[1]
|
110 |
+
bar = tqdm.tqdm(desc="generating", total=max_len - cur_len, disable=in_space)
|
111 |
model0_inputs = {}
|
112 |
model0_outputs = {}
|
113 |
emb_size = 1024
|