autonomous019 commited on
Commit
11cb1d3
1 Parent(s): 9163025

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -9,10 +9,6 @@ from transformers import ImageClassificationPipeline, PerceiverForImageClassific
9
  from transformers import VisionEncoderDecoderModel
10
  from transformers import AutoTokenizer
11
  import torch
12
-
13
- # https://github.com/NielsRogge/Transformers-Tutorials/blob/master/HuggingFace_vision_ecosystem_overview_(June_2022).ipynb
14
- # option 1: load with randomly initialized weights (train from scratch)
15
-
16
  from transformers import (
17
  AutoModelForCausalLM,
18
  LogitsProcessorList,
@@ -21,11 +17,11 @@ from transformers import (
21
  MaxLengthCriteria,
22
  )
23
 
 
 
24
 
25
-
26
-
27
-
28
-
29
 
30
 
31
  config = ViTConfig(num_hidden_layers=12, hidden_size=768)
@@ -44,8 +40,11 @@ model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/
44
  image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
45
 
46
  def create_story(text_seed):
47
- tokenizer = AutoTokenizer.from_pretrained("gpt2")
48
- model = AutoModelForCausalLM.from_pretrained("gpt2")
 
 
 
49
 
50
  # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
51
  model.config.pad_token_id = model.config.eos_token_id
 
9
  from transformers import VisionEncoderDecoderModel
10
  from transformers import AutoTokenizer
11
  import torch
 
 
 
 
12
  from transformers import (
13
  AutoModelForCausalLM,
14
  LogitsProcessorList,
 
17
  MaxLengthCriteria,
18
  )
19
 
20
+ # https://github.com/NielsRogge/Transformers-Tutorials/blob/master/HuggingFace_vision_ecosystem_overview_(June_2022).ipynb
21
+ # option 1: load with randomly initialized weights (train from scratch)
22
 
23
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
24
+ model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
 
 
25
 
26
 
27
  config = ViTConfig(num_hidden_layers=12, hidden_size=768)
 
40
  image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
41
 
42
  def create_story(text_seed):
43
+ #tokenizer = AutoTokenizer.from_pretrained("gpt2")
44
+ #model = AutoModelForCausalLM.from_pretrained("gpt2")
45
+
46
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
47
+ model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
48
 
49
  # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
50
  model.config.pad_token_id = model.config.eos_token_id