danavirtual commited on
Commit
de48798
β€’
1 Parent(s): 8b26038

Initial Commit

Browse files
Files changed (2) hide show
  1. app.py +215 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import torch
4
+ import transformers
5
+ import einops
6
+ ###
7
+ from typing import Any, Dict, Tuple
8
+ import warnings
9
+ import datetime
10
+ import os
11
+ from threading import Event, Thread
12
+ import torch
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
14
+
15
+
16
+ import config
17
+ import textwrap
18
+
19
+ INSTRUCTION_KEY = "### Instruction:"
20
+ RESPONSE_KEY = "### Response:"
21
+ END_KEY = "### End"
22
+ INTRO_BLURB = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
23
+ PROMPT_FOR_GENERATION_FORMAT = """{intro}
24
+ {instruction_key}
25
+ {instruction}
26
+ {response_key}
27
+ """.format(
28
+ intro=INTRO_BLURB,
29
+ instruction_key=INSTRUCTION_KEY,
30
+ instruction="{instruction}",
31
+ response_key=RESPONSE_KEY,
32
+ )
33
+
34
+
35
+ class InstructionTextGenerationPipeline:
36
+ def __init__(
37
+ self,
38
+ model_name,
39
+ torch_dtype=torch.bfloat16,
40
+ trust_remote_code=True,
41
+ use_auth_token=None,
42
+ ) -> None:
43
+ self.model = AutoModelForCausalLM.from_pretrained(
44
+ model_name,
45
+ torch_dtype=torch_dtype,
46
+ trust_remote_code=trust_remote_code,
47
+ use_auth_token=use_auth_token,
48
+ )
49
+
50
+ tokenizer = AutoTokenizer.from_pretrained(
51
+ model_name,
52
+ trust_remote_code=trust_remote_code,
53
+ use_auth_token=use_auth_token,
54
+ )
55
+ if tokenizer.pad_token_id is None:
56
+ warnings.warn(
57
+ "pad_token_id is not set for the tokenizer. Using eos_token_id as pad_token_id."
58
+ )
59
+ tokenizer.pad_token = tokenizer.eos_token
60
+ tokenizer.padding_side = "left"
61
+ self.tokenizer = tokenizer
62
+
63
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
64
+ self.model.eval()
65
+ self.model.to(device=device, dtype=torch_dtype)
66
+
67
+ self.generate_kwargs = {
68
+ "temperature": 0.5,
69
+ "top_p": 0.92,
70
+ "top_k": 0,
71
+ "max_new_tokens": 512,
72
+ "use_cache": True,
73
+ "do_sample": True,
74
+ "eos_token_id": self.tokenizer.eos_token_id,
75
+ "pad_token_id": self.tokenizer.pad_token_id,
76
+ "repetition_penalty": 1.1, # 1.0 means no penalty, > 1.0 means penalty, 1.2 from CTRL paper
77
+ }
78
+
79
+ def format_instruction(self, instruction):
80
+ return PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
81
+
82
+ def __call__(
83
+ self, instruction: str, **generate_kwargs: Dict[str, Any]
84
+ ) -> Tuple[str, str, float]:
85
+ s = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
86
+ input_ids = self.tokenizer(s, return_tensors="pt").input_ids
87
+ input_ids = input_ids.to(self.model.device)
88
+ gkw = {**self.generate_kwargs, **generate_kwargs}
89
+ with torch.no_grad():
90
+ output_ids = self.model.generate(input_ids, **gkw)
91
+ # Slice the output_ids tensor to get only new tokens
92
+ new_tokens = output_ids[0, len(input_ids[0]) :]
93
+ output_text = self.tokenizer.decode(new_tokens, skip_special_tokens=True)
94
+ return output_text
95
+ ##
96
+ from timeit import default_timer as timer
97
+ import time
98
+ import datetime
99
+ from datetime import datetime
100
+
101
+ import json
102
+ # create some interactive controls
103
+ import ipywidgets as widgets
104
+ from IPython.display import Markdown, display
105
+ from ipywidgets import Textarea, VBox, HBox
106
+
107
+ import sys
108
+ import os
109
+ import os.path as osp
110
+ import pprint
111
+ pp = pprint.PrettyPrinter(indent=4)
112
+
113
+ LIBRARY_PATH = "/home/ec2-user/workspace/Notebooks/lib"
114
+ module_path = os.path.abspath(os.path.join(LIBRARY_PATH))
115
+ if module_path not in sys.path:
116
+ sys.path.append(module_path)
117
+ print (f"sys.path : {sys.path}")
118
+
119
+ from InstructionTextGenerationPipeline import *
120
+
121
+
122
+ def complete(state="complete"):
123
+ print(f"\nCell {state} @ {(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))}")
124
+
125
+ complete(state='imports done')
126
+
127
+ complete(state="start generate")
128
+ generate = InstructionTextGenerationPipeline(
129
+ "mosaicml/mpt-7b-instruct",
130
+ torch_dtype=torch.bfloat16,
131
+ trust_remote_code=True,
132
+ )
133
+ stop_token_ids = generate.tokenizer.convert_tokens_to_ids(["<|endoftext|>"])
134
+ complete(state="Model generated")
135
+
136
+
137
+ # Define a custom stopping criteria
138
+ class StopOnTokens(StoppingCriteria):
139
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
140
+ for stop_id in stop_token_ids:
141
+ if input_ids[0][-1] == stop_id:
142
+ return True
143
+ return False
144
+
145
+ def process_stream(instruction, temperature, top_p, top_k, max_new_tokens):
146
+ # Tokenize the input
147
+ input_ids = generate.tokenizer(
148
+ generate.format_instruction(instruction), return_tensors="pt"
149
+ ).input_ids
150
+ input_ids = input_ids.to(generate.model.device)
151
+
152
+ # Initialize the streamer and stopping criteria
153
+ streamer = TextIteratorStreamer(
154
+ generate.tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
155
+ )
156
+ stop = StopOnTokens()
157
+
158
+ if temperature < 0.1:
159
+ temperature = 0.0
160
+ do_sample = False
161
+ else:
162
+ do_sample = True
163
+
164
+ gkw = {
165
+ **generate.generate_kwargs,
166
+ **{
167
+ "input_ids": input_ids,
168
+ "max_new_tokens": max_new_tokens,
169
+ "temperature": temperature,
170
+ "do_sample": do_sample,
171
+ "top_p": top_p,
172
+ "top_k": top_k,
173
+ "streamer": streamer,
174
+ "stopping_criteria": StoppingCriteriaList([stop]),
175
+ },
176
+ }
177
+
178
+ response = ''
179
+
180
+
181
+ def generate_and_signal_complete():
182
+ generate.model.generate(**gkw)
183
+
184
+ t1 = Thread(target=generate_and_signal_complete)
185
+ t1.start()
186
+
187
+ for new_text in streamer:
188
+ response += new_text
189
+
190
+ return response
191
+
192
+ gr.close_all()
193
+
194
+ def tester(uPrompt, max_new_tokens, temperature, top_k, top_p):
195
+ salutation = uPrompt
196
+ response = process_stream(uPrompt, temperature, top_p, top_k, max_new_tokens)
197
+ results = f"{salutation} max_new_tokens{max_new_tokens}; temperature{temperature}; top_k{top_k}; top_p{top_p}; "
198
+
199
+ return response
200
+ config.init_device="meta"
201
+ demo = gr.Interface(
202
+ fn=tester,
203
+ inputs=[gr.Textbox(label="Prompt",info="Prompt",lines=3,value="Provide Prompt"),
204
+ gr.Slider(256, 3072,value=1024, step=256, label="Tokens" ),
205
+ gr.Slider(0.0, 1.0, value=0.1, step=0.1, label='temperature:'),
206
+ gr.Slider(0, 1, value=0, step=1, label='top_k:'),
207
+ gr.Slider(0.0, 1.0, value=0.0, step=0.05, label='top_p:')
208
+
209
+ ],
210
+ outputs=["text"],
211
+ )
212
+ demo.launch(share=True,
213
+ server_name="0.0.0.0",
214
+ server_port=8081
215
+ )
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ transformers
3
+ einops