|
import os |
|
import requests |
|
from tqdm import tqdm |
|
import gradio as gr |
|
import pickle |
|
import random |
|
import numpy as np |
|
|
|
url = "https://huggingface.co/thefcraft/prompt-generator-stable-diffusion/resolve/main/models.pickle" |
|
|
|
if not os.path.exists('models.pickle'): |
|
response = requests.get(url, stream=True) |
|
|
|
with open('models.pickle', "wb") as handle: |
|
for data in tqdm(response.iter_content()): |
|
handle.write(data) |
|
|
|
|
|
with open('models.pickle', 'rb')as f: |
|
models = pickle.load(f) |
|
|
|
LORA_TOKEN = '' |
|
|
|
NOT_SPLIT_TOKEN = '<|>NOT_SPLIT_TOKEN<|>' |
|
|
|
def sample_next(ctx:str,model,k): |
|
|
|
ctx = ', '.join(ctx.split(', ')[-k:]) |
|
if model.get(ctx) is None: |
|
return " " |
|
possible_Chars = list(model[ctx].keys()) |
|
possible_values = list(model[ctx].values()) |
|
|
|
|
|
|
|
|
|
return np.random.choice(possible_Chars,p=possible_values) |
|
|
|
def generateText(model, minLen=100, size=5): |
|
keys = list(model.keys()) |
|
starting_sent = random.choice(keys) |
|
k = len(random.choice(keys).split(', ')) |
|
|
|
sentence = starting_sent |
|
ctx = ', '.join(starting_sent.split(', ')[-k:]) |
|
|
|
while True: |
|
next_prediction = sample_next(ctx,model,k) |
|
sentence += f", {next_prediction}" |
|
ctx = ', '.join(sentence.split(', ')[-k:]) |
|
|
|
|
|
if '\n' in sentence: break |
|
sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ') |
|
|
|
|
|
|
|
|
|
prompt = sentence.split('\n')[0] |
|
if len(prompt)<minLen: |
|
prompt = generateText(model, minLen, size=1)[0] |
|
|
|
size = size-1 |
|
if size == 0: return [prompt] |
|
output = [] |
|
for i in range(size+1): |
|
prompt = generateText(model, minLen, size=1)[0] |
|
output.append(prompt) |
|
|
|
return output |
|
|
|
def sentence_builder(quantity, Type, negative): |
|
if Type == "NSFW": idx=1 |
|
elif Type == "SFW": idx=2 |
|
else: idx=0 |
|
model = models[idx] |
|
output = "" |
|
for i in range(quantity): |
|
prompt = generateText(model[0], minLen=300, size=1)[0] |
|
output+=f"PROMPT: {prompt}\n\n" |
|
if negative: |
|
negative_prompt = generateText(model[1], minLen=300, size=5)[0] |
|
output+=f"NEGATIVE PROMPT: {prompt}\n" |
|
output+="----------------------------------------------------------------" |
|
output+="\n\n\n" |
|
|
|
return output[:-3] |
|
|
|
|
|
ui = gr.Interface( |
|
sentence_builder, |
|
[ |
|
gr.Slider(1, 10, value=4, label="Count", info="Choose between 1 and 10", step=1), |
|
gr.Radio(["NSFW", "SFW", "BOTH"], label="TYPE", info="NSFW stands for NOT SAFE FOR WORK, so choose any one you want?"), |
|
gr.Checkbox(label="negitive Prompt", info="Do you want to generate negative prompt as well as prompt?"), |
|
], |
|
"text" |
|
) |
|
|
|
if __name__ == "__main__": |
|
ui.launch() |