Update README.md
Browse files
README.md
CHANGED
@@ -8,12 +8,35 @@ pipeline_tag: text-generation
|
|
8 |
|
9 |
### How to use
|
10 |
```
|
11 |
-
import gradio as gr
|
12 |
import pickle
|
13 |
import random
|
14 |
import numpy as np
|
15 |
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
models = pickle.load(f)
|
18 |
|
19 |
LORA_TOKEN = ''#'<|>LORA_TOKEN<|>'
|
@@ -45,7 +68,6 @@ def generateText(model, minLen=100, size=5):
|
|
45 |
next_prediction = sample_next(ctx,model,k)
|
46 |
sentence += f", {next_prediction}"
|
47 |
ctx = ', '.join(sentence.split(', ')[-k:])
|
48 |
-
|
49 |
# if sentence.count('\n')>size: break
|
50 |
if '\n' in sentence: break
|
51 |
sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ')
|
@@ -65,36 +87,18 @@ def generateText(model, minLen=100, size=5):
|
|
65 |
output.append(prompt)
|
66 |
|
67 |
return output
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
prompt = generateText(model[0], minLen=minLen, size=1)[0]
|
77 |
-
output+=f"PROMPT: {prompt}\n\n"
|
78 |
-
if negative:
|
79 |
-
negative_prompt = generateText(model[1], minLen=minLen, size=5)[0]
|
80 |
-
output+=f"NEGATIVE PROMPT: {negative_prompt}\n"
|
81 |
-
output+="----------------------------------------------------------------"
|
82 |
-
output+="\n\n\n"
|
83 |
-
|
84 |
-
return output[:-3]
|
85 |
-
|
86 |
|
87 |
-
ui = gr.Interface(
|
88 |
-
sentence_builder,
|
89 |
-
[
|
90 |
-
gr.Slider(1, 10, value=4, label="Count", info="Choose between 1 and 10", step=1),
|
91 |
-
gr.Slider(100, 1000, value=300, label="minLen", info="Choose between 100 and 1000", step=50),
|
92 |
-
gr.Radio(["NSFW", "SFW", "BOTH"], label="TYPE", info="NSFW stands for NOT SAFE FOR WORK, so choose any one you want?"),
|
93 |
-
gr.Checkbox(label="negitive Prompt", info="Do you want to generate negative prompt as well as prompt?"),
|
94 |
-
],
|
95 |
-
"text"
|
96 |
-
)
|
97 |
|
98 |
-
if __name__ == "__main__":
|
99 |
-
ui.launch()
|
100 |
```
|
|
|
8 |
|
9 |
### How to use
|
10 |
```
|
|
|
11 |
import pickle
|
12 |
import random
|
13 |
import numpy as np
|
14 |
|
15 |
+
import os
|
16 |
+
import wget
|
17 |
+
from zipfile import ZipFile
|
18 |
+
|
19 |
+
|
20 |
+
def download_model(force = False):
|
21 |
+
if force == True: print('downloading model file size is 108 MB so it may take some time to complete...')
|
22 |
+
try:
|
23 |
+
url = "https://huggingface.co/thefcraft/prompt-generator-stable-diffusion/resolve/main/models.pickle.zip"
|
24 |
+
if force == True:
|
25 |
+
with open("models.pickle.zip", 'w'): pass
|
26 |
+
wget.download(url, "models.pickle.zip")
|
27 |
+
if not os.path.exists('models.pickle.zip'): wget.download(url, "models.pickle.zip")
|
28 |
+
print('Download zip file now extracting model')
|
29 |
+
with ZipFile("models.pickle.zip", 'r') as zObject: zObject.extractall()
|
30 |
+
print('extracted model .. now all done')
|
31 |
+
return True
|
32 |
+
except:
|
33 |
+
if force == False: return download_model(force=True)
|
34 |
+
print('Something went wrong\ndownload model via link: `https://huggingface.co/thefcraft/prompt-generator-stable-diffusion/tree/main`')
|
35 |
+
try: os.chdir(os.path.abspath(os.path.dirname(__file__)))
|
36 |
+
except: pass
|
37 |
+
if not os.path.exists('models.pickle'): download_model()
|
38 |
+
|
39 |
+
with open('models.pickle', 'rb')as f:
|
40 |
models = pickle.load(f)
|
41 |
|
42 |
LORA_TOKEN = ''#'<|>LORA_TOKEN<|>'
|
|
|
68 |
next_prediction = sample_next(ctx,model,k)
|
69 |
sentence += f", {next_prediction}"
|
70 |
ctx = ', '.join(sentence.split(', ')[-k:])
|
|
|
71 |
# if sentence.count('\n')>size: break
|
72 |
if '\n' in sentence: break
|
73 |
sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ')
|
|
|
87 |
output.append(prompt)
|
88 |
|
89 |
return output
|
90 |
+
if __name__ == "__main__":
|
91 |
+
for model in models: # models = [(model, neg_model), (nsfw, neg_nsfw), (sfw, neg_sfw)]
|
92 |
+
text = generateText(model[0], minLen=300, size=5)
|
93 |
+
text_neg = generateText(model[1], minLen=300, size=5)
|
94 |
|
95 |
+
# print('\n'.join(text))
|
96 |
+
for i in range(len(text)):
|
97 |
+
print(text[i])
|
98 |
+
# print('negativePrompt:')
|
99 |
+
print(text_neg[i])
|
100 |
+
print('----------------------------------------------------------------')
|
101 |
+
print('********************************************************************************************************************************************************')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
|
|
|
|
104 |
```
|