Deadmon commited on
Commit
75a7f7d
1 Parent(s): c05f545

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  from janus.models import MultiModalityCausalLM, VLChatProcessor
3
  from janus.utils.io import load_pil_images
4
  import numpy as np
@@ -6,10 +7,6 @@ from PIL import Image
6
  from transformers import AutoConfig, AutoModelForCausalLM
7
  import torch
8
 
9
- ##
10
- # Code from deepseek-ai/Janus
11
- # Space from huggingface/twodgirl.
12
-
13
  def generate(input_ids,
14
  width,
15
  height,
@@ -58,10 +55,10 @@ def unpack(dec, width, height, parallel_size=1):
58
  return visual_img
59
 
60
  @torch.inference_mode()
 
61
  def generate_image(prompt,
62
  width,
63
  height,
64
- # num_steps,
65
  guidance,
66
  seed):
67
  if seed > -1:
@@ -117,7 +114,6 @@ if __name__ == '__main__':
117
  model_path = 'deepseek-ai/Janus-1.3B'
118
  processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
119
  tokenizer = processor.tokenizer
120
- # model: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
121
  config = AutoConfig.from_pretrained(model_path)
122
  language_config = config.language_config
123
  language_config._attn_implementation = 'eager'
 
1
  import gradio as gr
2
+ import spaces # Import spaces for ZeroGPU compatibility
3
  from janus.models import MultiModalityCausalLM, VLChatProcessor
4
  from janus.utils.io import load_pil_images
5
  import numpy as np
 
7
  from transformers import AutoConfig, AutoModelForCausalLM
8
  import torch
9
 
 
 
 
 
10
  def generate(input_ids,
11
  width,
12
  height,
 
55
  return visual_img
56
 
57
  @torch.inference_mode()
58
+ @spaces.GPU # Decorate the function for ZeroGPU compatibility
59
  def generate_image(prompt,
60
  width,
61
  height,
 
62
  guidance,
63
  seed):
64
  if seed > -1:
 
114
  model_path = 'deepseek-ai/Janus-1.3B'
115
  processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
116
  tokenizer = processor.tokenizer
 
117
  config = AutoConfig.from_pretrained(model_path)
118
  language_config = config.language_config
119
  language_config._attn_implementation = 'eager'