yeq6x commited on
Commit
bd4674a
1 Parent(s): d9b35ec

use_local編集

Browse files
Files changed (4) hide show
  1. app.py +4 -3
  2. scripts/anime.py +14 -7
  3. scripts/model.py +5 -2
  4. scripts/process_utils.py +8 -5
app.py CHANGED
@@ -79,7 +79,7 @@ def worker():
79
  if task.task_id in active_tasks:
80
  future = executor.submit(process_task, task)
81
  task_futures[task.task_id] = future
82
- update_queue_status(f'Task started: {task.task_id}')
83
  except Exception as e:
84
  print(f"Worker error: {str(e)}")
85
  finally:
@@ -187,7 +187,8 @@ if __name__ == '__main__':
187
  parser = argparse.ArgumentParser(description='Server options.')
188
  parser.add_argument('--use_local', action='store_true', help='Use local model')
189
  parser.add_argument('--use_gpu', action='store_true', help='Set to True to use GPU but if not available, it will use CPU')
 
190
  args = parser.parse_args()
191
 
192
- initialize(args.use_local, args.use_gpu)
193
- socketio.run(app, debug=True, host='0.0.0.0', port=80)
 
79
  if task.task_id in active_tasks:
80
  future = executor.submit(process_task, task)
81
  task_futures[task.task_id] = future
82
+ update_queue_status(f'Task processing: {task.task_id}')
83
  except Exception as e:
84
  print(f"Worker error: {str(e)}")
85
  finally:
 
187
  parser = argparse.ArgumentParser(description='Server options.')
188
  parser.add_argument('--use_local', action='store_true', help='Use local model')
189
  parser.add_argument('--use_gpu', action='store_true', help='Set to True to use GPU but if not available, it will use CPU')
190
+ parser.add_argument('--use_dotenv', action='store_true', help='Use .env file for environment variables')
191
  args = parser.parse_args()
192
 
193
+ initialize(args.use_local, args.use_gpu, args.use_dotenv)
194
+ socketio.run(app, debug=True, host='0.0.0.0', port=5000)
scripts/anime.py CHANGED
@@ -6,15 +6,22 @@ Example:
6
 
7
  import os
8
  import torch
9
- from scripts.data import get_image_list, get_transform
10
  from scripts.model import create_model
11
- from scripts.data import tensor_to_img, save_image
12
  import argparse
13
  from tqdm.auto import tqdm
14
  from kornia.enhance import equalize_clahe
15
  from PIL import Image
16
  import numpy as np
17
 
 
 
 
 
 
 
 
 
18
 
19
  # numpy配列の画像を受け取り、線画を生成してnumpy配列で返す
20
  def generate_sketch(image, clahe_clip=-1, load_size=512):
@@ -28,10 +35,10 @@ def generate_sketch(image, clahe_clip=-1, load_size=512):
28
  np.ndarray: output image
29
  """
30
  # create model
31
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
32
- model_opt = "default"
33
- model = create_model(model_opt).to(device)
34
- model.eval()
35
 
36
  aus_resize = None
37
  if load_size > 0:
@@ -85,7 +92,7 @@ if __name__ == '__main__':
85
  # create model
86
  gpu_list = ','.join(str(x) for x in opt.gpu_ids)
87
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
88
- model = create_model(opt.model).to(device) # create a model given opt.model and other options
89
  model.eval()
90
 
91
  for test_path in tqdm(get_image_list(opt.dataroot)):
 
6
 
7
  import os
8
  import torch
9
+ from scripts.data import get_image_list, get_transform, tensor_to_img, save_image
10
  from scripts.model import create_model
 
11
  import argparse
12
  from tqdm.auto import tqdm
13
  from kornia.enhance import equalize_clahe
14
  from PIL import Image
15
  import numpy as np
16
 
17
+ model = None
18
+
19
+ def init_model(use_local=False):
20
+ global model
21
+ model_opt = "default"
22
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
23
+ model = create_model(model_opt, use_local).to(device)
24
+ model.eval()
25
 
26
  # numpy配列の画像を受け取り、線画を生成してnumpy配列で返す
27
  def generate_sketch(image, clahe_clip=-1, load_size=512):
 
35
  np.ndarray: output image
36
  """
37
  # create model
38
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
+ # model_opt = "default"
40
+ # model = create_model(model_opt).to(device)
41
+ # model.eval()
42
 
43
  aus_resize = None
44
  if load_size > 0:
 
92
  # create model
93
  gpu_list = ','.join(str(x) for x in opt.gpu_ids)
94
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
95
+ model = create_model(opt.model, use_local=True).to(device) # create a model given opt.model and other options
96
  model.eval()
97
 
98
  for test_path in tqdm(get_image_list(opt.dataroot)):
scripts/model.py CHANGED
@@ -144,7 +144,7 @@ class Upsample(nn.Module):
144
  return x
145
 
146
 
147
- def create_model(model):
148
  """Create a model for anime2sketch
149
  hardcoding the options for simplicity
150
  """
@@ -156,7 +156,10 @@ def create_model(model):
156
  cwd = os.getcwd() # 現在のディレクトリを保存
157
  os.chdir(os.path.dirname(__file__)) # このファイルのディレクトリに移動
158
  if model == 'default':
159
- model_path = download_file("netG.pth", subfolder="models/Anime2Sketch")
 
 
 
160
  ckpt = torch.load(model_path)
161
  for key in list(ckpt.keys()):
162
  if 'module.' in key:
 
144
  return x
145
 
146
 
147
+ def create_model(model, use_local):
148
  """Create a model for anime2sketch
149
  hardcoding the options for simplicity
150
  """
 
156
  cwd = os.getcwd() # 現在のディレクトリを保存
157
  os.chdir(os.path.dirname(__file__)) # このファイルのディレクトリに移動
158
  if model == 'default':
159
+ model_path = (lambda filename, subfolder: os.path.join(subfolder, filename) if use_local else download_file(filename, subfolder)) \
160
+ ("netG.pth", "models/Anime2Sketch")
161
+ # model_path = ((filename, subfolder) => if (use_local) os.path.join(subfolder, filename) else download_file(filename, subfolder))("netG.pth", "models/Anime2Sketch") // JavaScript
162
+
163
  ckpt = torch.load(model_path)
164
  for key in list(ckpt.keys()):
165
  if 'module.' in key:
scripts/process_utils.py CHANGED
@@ -6,6 +6,7 @@ import cv2
6
  import numpy as np
7
  from scripts.generate_prompt import load_wd14_tagger_model, generate_tags, preprocess_image as wd14_preprocess_image
8
  from scripts.lineart_util import scribble_xdog, get_sketch, canny
 
9
  import torch
10
  from diffusers import StableDiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, AutoencoderKL
11
  import gc
@@ -32,8 +33,9 @@ def ensure_rgb(image):
32
  return image.convert('RGB')
33
  return image
34
 
35
- def initialize(_use_local, use_gpu):
36
- # load_dotenv()
 
37
  global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
38
  device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
39
  torch_dtype = torch.float16 if device == "cuda" else torch.float32
@@ -41,6 +43,7 @@ def initialize(_use_local, use_gpu):
41
  print('')
42
  print(f"Device: {device}, Local model: {_use_local}")
43
  print('')
 
44
  model = load_wd14_tagger_model()
45
  sotai_gen_pipe = initialize_sotai_model()
46
  refine_gen_pipe = initialize_refine_model()
@@ -53,8 +56,8 @@ def initialize_sotai_model():
53
  global device, torch_dtype
54
 
55
  sotai_sd_model_path = get_file_path(os.environ["sotai_sd_model_name"], subfolder=os.environ["sd_models_dir"])
56
- # controlnet_path1 = get_file_path(os.environ["controlnet_name1"], subfolder=os.environ["controlnet_dir2"])
57
- controlnet_path1 = get_file_path(os.environ["controlnet_name2"], subfolder=os.environ["controlnet_dir1"])
58
  controlnet_path2 = get_file_path(os.environ["controlnet_name2"], subfolder=os.environ["controlnet_dir1"])
59
  print(use_local, controlnet_path1)
60
 
@@ -229,7 +232,7 @@ def generate_sotai_image(input_image: Image.Image, output_width: int, output_hei
229
  denoising_strength=0.13,
230
  num_images_per_prompt=1, # Equivalent to batch_size
231
  guess_mode=[True, True], # Equivalent to pixel_perfect
232
- controlnet_conditioning_scale=[1.2, 1.3], # 各ControlNetの重み
233
  guidance_start=[0.0, 0.0],
234
  guidance_end=[1.0, 1.0],
235
  )
 
6
  import numpy as np
7
  from scripts.generate_prompt import load_wd14_tagger_model, generate_tags, preprocess_image as wd14_preprocess_image
8
  from scripts.lineart_util import scribble_xdog, get_sketch, canny
9
+ from scripts.anime import init_model
10
  import torch
11
  from diffusers import StableDiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, AutoencoderKL
12
  import gc
 
33
  return image.convert('RGB')
34
  return image
35
 
36
+ def initialize(_use_local=False, use_gpu=False, use_dotenv=False):
37
+ if use_dotenv:
38
+ load_dotenv()
39
  global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
40
  device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
41
  torch_dtype = torch.float16 if device == "cuda" else torch.float32
 
43
  print('')
44
  print(f"Device: {device}, Local model: {_use_local}")
45
  print('')
46
+ init_model(use_local)
47
  model = load_wd14_tagger_model()
48
  sotai_gen_pipe = initialize_sotai_model()
49
  refine_gen_pipe = initialize_refine_model()
 
56
  global device, torch_dtype
57
 
58
  sotai_sd_model_path = get_file_path(os.environ["sotai_sd_model_name"], subfolder=os.environ["sd_models_dir"])
59
+ controlnet_path1 = get_file_path(os.environ["controlnet_name1"], subfolder=os.environ["controlnet_dir2"])
60
+ # controlnet_path1 = get_file_path(os.environ["controlnet_name2"], subfolder=os.environ["controlnet_dir1"])
61
  controlnet_path2 = get_file_path(os.environ["controlnet_name2"], subfolder=os.environ["controlnet_dir1"])
62
  print(use_local, controlnet_path1)
63
 
 
232
  denoising_strength=0.13,
233
  num_images_per_prompt=1, # Equivalent to batch_size
234
  guess_mode=[True, True], # Equivalent to pixel_perfect
235
+ controlnet_conditioning_scale=[1.4, 1.3], # 各ControlNetの重み
236
  guidance_start=[0.0, 0.0],
237
  guidance_end=[1.0, 1.0],
238
  )