r3gm commited on
Commit
50194bf
1 Parent(s): ed6199e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -176,7 +176,7 @@ class GuiSD:
176
 
177
  if self.model.base_model_id != model_name:
178
  load_now_time = datetime.now()
179
- elapsed_time = (load_now_time - self.last_load).total_seconds()
180
 
181
  if elapsed_time <= 8:
182
  print("Waiting for the previous model's time ops...")
@@ -205,7 +205,7 @@ class GuiSD:
205
  yield f"Model loaded: {model_name}"
206
 
207
  # @spaces.GPU(duration=59)
208
- # @torch.inference_mode()
209
  def generate_pipeline(
210
  self,
211
  prompt,
@@ -519,7 +519,7 @@ class GuiSD:
519
 
520
  def dynamic_gpu_duration(func, duration, *args):
521
 
522
- @torch.inference_mode()
523
  @spaces.GPU(duration=duration)
524
  def wrapped_func():
525
  yield from func(*args)
@@ -1201,4 +1201,4 @@ app.launch(
1201
  show_error=True,
1202
  debug=True,
1203
  allowed_paths=["./images/"],
1204
- )
 
176
 
177
  if self.model.base_model_id != model_name:
178
  load_now_time = datetime.now()
179
+ elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
180
 
181
  if elapsed_time <= 8:
182
  print("Waiting for the previous model's time ops...")
 
205
  yield f"Model loaded: {model_name}"
206
 
207
  # @spaces.GPU(duration=59)
208
+ @torch.inference_mode()
209
  def generate_pipeline(
210
  self,
211
  prompt,
 
519
 
520
  def dynamic_gpu_duration(func, duration, *args):
521
 
522
+ # @torch.inference_mode()
523
  @spaces.GPU(duration=duration)
524
  def wrapped_func():
525
  yield from func(*args)
 
1201
  show_error=True,
1202
  debug=True,
1203
  allowed_paths=["./images/"],
1204
+ )