xsnames king007 commited on
Commit
8780c33
β€’
0 Parent(s):

Duplicate from king007/GPT-Prompt-Generate-2

Browse files

Co-authored-by: king007 <[email protected]>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +40 -0
  4. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GPT Prompt Generate 2
3
+ emoji: πŸ‘¨πŸ»β€πŸŽ€
4
+ colorFrom: purple
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.16.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: king007/GPT-Prompt-Generate-2
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
+ import gradio as gr
3
+
4
+ tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompt-generator-v12")
5
+ model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompt-generator-v12", from_tf=True)
6
+ #
7
+ tokenizer2 = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
8
+ model2 = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
9
+
10
+ def generate(prompt, max_new_tokens):
11
+ batch = tokenizer(prompt, return_tensors="pt")
12
+ generated_ids = model.generate(batch["input_ids"], max_new_tokens=int(max_new_tokens))
13
+ output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
14
+ return output[0]
15
+
16
+ def generate2(prompt, max_new_tokens):
17
+ batch = tokenizer2(prompt, return_tensors="pt")
18
+ generated_ids = model2.generate(batch["input_ids"], max_new_tokens=int(max_new_tokens))
19
+ output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
20
+ return output[0]
21
+
22
+ def generate2_test(prompt):
23
+ batch = tokenizer2(prompt, return_tensors="pt")
24
+ generated_ids = model2.generate(batch["input_ids"], max_new_tokens=150)
25
+ output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
26
+ return output[0]
27
+
28
+ def generate_prompt(aitype, prompt, max_new_tokens):
29
+ if aitype=='1':
30
+ return generate(prompt, max_new_tokens)
31
+ elif aitype=='2':
32
+ return generate2(prompt, max_new_tokens)
33
+ #
34
+ input_aitype = gr.Textbox(label = "Input a persona, e.g. photographer", value = "2")
35
+ input_prompt = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
36
+ input_maxtokens = gr.Textbox(label = "max tokens", value = "150")
37
+ output_component = gr.Textbox(label = "Prompt")
38
+ examples = [["photographer"], ["developer"]]
39
+ description = ""
40
+ gr.Interface(generate_prompt, inputs = [input_aitype,input_prompt,input_maxtokens], outputs=output_component, examples=examples, title = "πŸ‘¨πŸ»β€πŸŽ€ ChatGPT Prompt Generator v12 πŸ‘¨πŸ»β€πŸŽ€", description=description).launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ tensorflow
2
+ transformers
3
+ torch