NIRVANALAN commited on
Commit
2ab2d9e
β€’
1 Parent(s): ad1670c

update dep

Browse files
Files changed (2) hide show
  1. Dockerfile +31 -0
  2. app.py +60 -58
Dockerfile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM docker.io/library/python:3.10.13@sha256:d5b1fbbc00fd3b55620a9314222498bebf09c4bf606425bf464709ed6a79f202
2
+
3
+ RUN apt-get update && apt-get install -y git git-lfs ffmpeg libsm6 libxext6 cmake rsync libgl1-mesa-glx && rm -rf /var/lib/apt/lists/* && git lfs install
4
+
5
+ RUN pip install --no-cache-dir pip==22.3.1 && pip install --no-cache-dir datasets "huggingface-hub>=0.19" "hf-transfer>=0.1.4" "protobuf<4" "click<8.1" "pydantic~=1.0" torch==2.3.1
6
+
7
+ RUN apt-get update && apt-get install -y fakeroot && mv /usr/bin/apt-get /usr/bin/.apt-get && echo '#!/usr/bin/env sh\nfakeroot /usr/bin/.apt-get $@' > /usr/bin/apt-get && chmod +x /usr/bin/apt-get && rm -rf /var/lib/apt/lists/* && useradd -m -u 1000 user
8
+
9
+ COPY --chown=1000:1000 --from=root / /
10
+
11
+ RUN --mount=target=/tmp/requirements.txt,source=requirements.txt pip install --no-cache-dir -r /tmp/requirements.txt
12
+
13
+ RUN pip freeze > /tmp/freeze.txt
14
+
15
+ WORKDIR /home/user/app
16
+
17
+ RUN pip install --no-cache-dir gradio[oauth]==4.25.0 "uvicorn>=0.14.0" spaces==0.29.3
18
+
19
+ COPY --link --chown=1000 ./ /home/user/app
20
+
21
+ COPY --from=pipfreeze --link --chown=1000 /tmp/freeze.txt /tmp/freeze.txt
22
+
23
+ # ENV PYTHONPATH=${HOME}/app \
24
+ # PYTHONUNBUFFERED=1 \
25
+ # GRADIO_ALLOW_FLAGGING=never \
26
+ # GRADIO_NUM_PORTS=1 \
27
+ # GRADIO_SERVER_NAME=0.0.0.0 \
28
+ # GRADIO_THEME=huggingface \
29
+ # SYSTEM=spaces
30
+
31
+ CMD ["python", "app.py"]
app.py CHANGED
@@ -32,6 +32,56 @@ import numpy as np
32
  import torch as th
33
  import torch.distributed as dist
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  def resize_to_224(img):
37
  img = transforms.functional.resize(img, 224,
@@ -411,75 +461,27 @@ if __name__ == "__main__":
411
  os.environ[
412
  "TORCH_DISTRIBUTED_DEBUG"] = "DETAIL" # set to DETAIL for runtime logging.
413
 
 
414
 
415
- zero = torch.Tensor([0]).cuda()
416
- print(zero.device) # <-- 'cpu' πŸ€”
417
-
418
- @spaces.GPU
419
- def greet(n):
420
-
421
- def install_dependency():
422
- # install full cuda first
423
- # subprocess.run(
424
- # f'conda install -c nvidia cuda-nvcc',
425
- # shell=True
426
- # )
427
-
428
- # install apex
429
- subprocess.run(
430
- f'TORCH_CUDA_ARCH_LIST="compute capability" FORCE_CUDA=1 {sys.executable} -m pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git@master',
431
- shell=True,
432
- )
433
 
434
- th.backends.cuda.matmul.allow_tf32 = True
435
- th.backends.cudnn.allow_tf32 = True
436
- th.backends.cudnn.enabled = True
437
-
438
- install_dependency()
439
-
440
- from guided_diffusion import dist_util, logger
441
- from guided_diffusion.script_util import (
442
- NUM_CLASSES,
443
- model_and_diffusion_defaults,
444
- create_model_and_diffusion,
445
- add_dict_to_argparser,
446
- args_to_dict,
447
- continuous_diffusion_defaults,
448
- control_net_defaults,
449
- )
450
 
451
- from pathlib import Path
452
 
453
- from tqdm import tqdm, trange
454
- import dnnlib
455
- from nsr.train_util_diffusion import TrainLoop3DDiffusion as TrainLoop
456
- from guided_diffusion.continuous_diffusion import make_diffusion as make_sde_diffusion
457
- import nsr
458
- import nsr.lsgm
459
- from nsr.script_util import create_3DAE_model, encoder_and_nsr_defaults, loss_defaults, AE_with_Diffusion, rendering_options_defaults, eg3d_options_default, dataset_defaults
460
 
461
- from datasets.shapenet import load_eval_data
462
- from torch.utils.data import Subset
463
- from datasets.eg3d_dataset import init_dataset_kwargs
464
 
465
- from transport.train_utils import parse_transport_args
 
466
 
467
- from utils.infer_utils import remove_background, resize_foreground
468
 
469
 
470
  print(zero.device) # <-- 'cuda:0' πŸ€—
471
  return f"Hello {zero + n} Tensor"
472
 
473
- # args = create_argparser().parse_args()
474
-
475
- # # args.local_rank = int(os.environ["LOCAL_RANK"])
476
- # args.local_rank = 0
477
- # args.gpus = th.cuda.device_count()
478
-
479
- # args.rendering_kwargs = rendering_options_defaults(args)
480
-
481
- # main(args)
482
-
483
-
484
  demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
485
  demo.launch()
 
32
  import torch as th
33
  import torch.distributed as dist
34
 
35
+ def install_dependency():
36
+ # install full cuda first
37
+ # subprocess.run(
38
+ # f'conda install -c nvidia cuda-nvcc',
39
+ # shell=True
40
+ # )
41
+
42
+ # install apex
43
+ subprocess.run(
44
+ f'TORCH_CUDA_ARCH_LIST="compute capability" FORCE_CUDA=1 {sys.executable} -m pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git@master',
45
+ shell=True,
46
+ )
47
+
48
+ th.backends.cuda.matmul.allow_tf32 = True
49
+ th.backends.cudnn.allow_tf32 = True
50
+ th.backends.cudnn.enabled = True
51
+
52
+ install_dependency()
53
+
54
+ from guided_diffusion import dist_util, logger
55
+ from guided_diffusion.script_util import (
56
+ NUM_CLASSES,
57
+ model_and_diffusion_defaults,
58
+ create_model_and_diffusion,
59
+ add_dict_to_argparser,
60
+ args_to_dict,
61
+ continuous_diffusion_defaults,
62
+ control_net_defaults,
63
+ )
64
+
65
+ from pathlib import Path
66
+
67
+ from tqdm import tqdm, trange
68
+ import dnnlib
69
+ from nsr.train_util_diffusion import TrainLoop3DDiffusion as TrainLoop
70
+ from guided_diffusion.continuous_diffusion import make_diffusion as make_sde_diffusion
71
+ import nsr
72
+ import nsr.lsgm
73
+ from nsr.script_util import create_3DAE_model, encoder_and_nsr_defaults, loss_defaults, AE_with_Diffusion, rendering_options_defaults, eg3d_options_default, dataset_defaults
74
+
75
+ from datasets.shapenet import load_eval_data
76
+ from torch.utils.data import Subset
77
+ from datasets.eg3d_dataset import init_dataset_kwargs
78
+
79
+ from transport.train_utils import parse_transport_args
80
+
81
+ from utils.infer_utils import remove_background, resize_foreground
82
+
83
+ SEED = 0
84
+
85
 
86
  def resize_to_224(img):
87
  img = transforms.functional.resize(img, 224,
 
461
  os.environ[
462
  "TORCH_DISTRIBUTED_DEBUG"] = "DETAIL" # set to DETAIL for runtime logging.
463
 
464
+ args = create_argparser().parse_args()
465
 
466
+ # args.local_rank = int(os.environ["LOCAL_RANK"])
467
+ args.local_rank = 0
468
+ args.gpus = th.cuda.device_count()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
 
470
+ args.rendering_kwargs = rendering_options_defaults(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471
 
 
472
 
473
+ # main(args)
 
 
 
 
 
 
474
 
475
+ zero = torch.Tensor([0]).cuda()
476
+ print(zero.device) # <-- 'cpu' πŸ€”
 
477
 
478
+ @spaces.GPU
479
+ def greet(n):
480
 
 
481
 
482
 
483
  print(zero.device) # <-- 'cuda:0' πŸ€—
484
  return f"Hello {zero + n} Tensor"
485
 
 
 
 
 
 
 
 
 
 
 
 
486
  demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
487
  demo.launch()