Spaces:
Sleeping
Sleeping
drakosfire
commited on
Commit
•
1c94d3e
1
Parent(s):
0f3ebe6
Not going great, most of these are not great changes
Browse files- __pycache__/img2img.cpython-310.pyc +0 -0
- __pycache__/item_dict_gen.cpython-310.pyc +0 -0
- __pycache__/main.cpython-310.pyc +0 -0
- __pycache__/template_builder.cpython-310.pyc +0 -0
- __pycache__/user_input.cpython-310.pyc +0 -0
- img2img.py +48 -49
- item_dict_gen.py +29 -34
- main.py +5 -4
- template_builder.py +2 -2
- user_input.py +3 -2
__pycache__/img2img.cpython-310.pyc
CHANGED
Binary files a/__pycache__/img2img.cpython-310.pyc and b/__pycache__/img2img.cpython-310.pyc differ
|
|
__pycache__/item_dict_gen.cpython-310.pyc
CHANGED
Binary files a/__pycache__/item_dict_gen.cpython-310.pyc and b/__pycache__/item_dict_gen.cpython-310.pyc differ
|
|
__pycache__/main.cpython-310.pyc
CHANGED
Binary files a/__pycache__/main.cpython-310.pyc and b/__pycache__/main.cpython-310.pyc differ
|
|
__pycache__/template_builder.cpython-310.pyc
CHANGED
Binary files a/__pycache__/template_builder.cpython-310.pyc and b/__pycache__/template_builder.cpython-310.pyc differ
|
|
__pycache__/user_input.cpython-310.pyc
CHANGED
Binary files a/__pycache__/user_input.cpython-310.pyc and b/__pycache__/user_input.cpython-310.pyc differ
|
|
img2img.py
CHANGED
@@ -9,63 +9,62 @@ from PIL import Image
|
|
9 |
pipe = None
|
10 |
start_time = time.time()
|
11 |
torch.backends.cuda.matmul.allow_tf32 = True
|
12 |
-
model_path =
|
13 |
-
lora_path = "
|
14 |
-
detail_lora_path = "
|
15 |
-
mimic_lora_path = "./models/stable-diffusion/Loras/EnvyMimicXL01.safetensors"
|
16 |
temp_image_path = "./image_temp/"
|
17 |
card_pre_prompt = " blank magic card,high resolution, detailed intricate high quality border, textbox, high quality detailed magnum opus drawing of a "
|
18 |
negative_prompts = "text, words, numbers, letters"
|
19 |
image_list = []
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
def
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
variant="fp16").to("cuda")
|
32 |
-
# Load LoRAs for controlling image
|
33 |
-
#pipe.load_lora_weights(lora_path, weight_name = "blank-card-template-5.safetensors",adapter_name = 'blank-card-template')
|
34 |
-
pipe.load_lora_weights(detail_lora_path, weight_name = "add-detail-xl.safetensors", adapter_name = "add-detail-xl")
|
35 |
-
|
36 |
-
# If mimic keyword has been detected, load the mimic LoRA and set adapter values
|
37 |
-
if mimic:
|
38 |
-
pipe.load_lora_weights(mimic_lora_path, weight_name = "EnvyMimicXL01.safetensors", adapter_name = "EnvyMimicXL")
|
39 |
-
pipe.set_adapters(['blank-card-template', "add-detail-xl", "EnvyMimicXL"], adapter_weights = [0.9,0.9,1.0])
|
40 |
-
else :
|
41 |
-
pipe.set_adapters([ "add-detail-xl"], adapter_weights = [0.9])
|
42 |
-
pipe.enable_vae_slicing()
|
43 |
-
return pipe, prompt
|
44 |
-
|
45 |
-
def preview_and_generate_image(x,pipe, prompt, user_input_template, item):
|
46 |
-
img_start = time.time()
|
47 |
-
image = pipe(prompt=prompt,
|
48 |
-
strength = .9,
|
49 |
-
guidance_scale = 5,
|
50 |
-
image= user_input_template,
|
51 |
-
negative_promt = negative_prompts,
|
52 |
-
num_inference_steps=40,
|
53 |
-
height = 1024, width = 768).images[0]
|
54 |
-
|
55 |
-
image = image.save(temp_image_path+str(x) + f"{item}.png")
|
56 |
-
output_image_path = temp_image_path+str(x) + f"{item}.png"
|
57 |
-
img_time = time.time() - img_start
|
58 |
-
img_its = 50/img_time
|
59 |
-
print(f"image gen time = {img_time} and {img_its} it/s")
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
|
69 |
|
70 |
|
71 |
|
|
|
9 |
pipe = None
|
10 |
start_time = time.time()
|
11 |
torch.backends.cuda.matmul.allow_tf32 = True
|
12 |
+
model_path = "/media/drakosfire/Shared/models/stable-diffusion/card-generator-v1/card-generator-v1.safetensors"
|
13 |
+
lora_path = "/media/drakosfire/Shared/models/stable-diffusion/card-generator-v1/blank-card-template-5.safetensors"
|
14 |
+
detail_lora_path = "/media/drakosfire/Shared/models/stable-diffusion/card-generator-v1/add-detail-xl.safetensors"
|
|
|
15 |
temp_image_path = "./image_temp/"
|
16 |
card_pre_prompt = " blank magic card,high resolution, detailed intricate high quality border, textbox, high quality detailed magnum opus drawing of a "
|
17 |
negative_prompts = "text, words, numbers, letters"
|
18 |
image_list = []
|
19 |
|
20 |
+
class img_generator():
|
21 |
+
def load_img_gen(prompt, item, mimic = None):
|
22 |
+
prompt = card_pre_prompt + item + ' ' + prompt
|
23 |
+
print(prompt)
|
24 |
+
|
25 |
+
|
26 |
+
pipe = StableDiffusionXLImg2ImgPipeline.from_single_file(model_path,
|
27 |
+
custom_pipeline="low_stable_diffusion",
|
28 |
+
torch_dtype=torch.float16,
|
29 |
+
variant="fp16").to("cuda")
|
30 |
+
# Load LoRAs for controlling image
|
31 |
+
#pipe.load_lora_weights(lora_path, weight_name = "blank-card-template-5.safetensors",adapter_name = 'blank-card-template')
|
32 |
+
pipe.load_lora_weights(detail_lora_path, weight_name = "add-detail-xl.safetensors", adapter_name = "add-detail-xl")
|
33 |
+
|
34 |
+
# If mimic keyword has been detected, load the mimic LoRA and set adapter values
|
35 |
+
if mimic:
|
36 |
+
print("MIMIC!")
|
37 |
+
pipe.load_lora_weights(mimic_lora_path, weight_name = "EnvyMimicXL01.safetensors", adapter_name = "EnvyMimicXL")
|
38 |
+
pipe.set_adapters(['blank-card-template', "add-detail-xl", "EnvyMimicXL"], adapter_weights = [0.9,0.9,1.0])
|
39 |
+
else :
|
40 |
+
pipe.set_adapters([ "add-detail-xl"], adapter_weights = [0.9])
|
41 |
+
pipe.enable_vae_slicing()
|
42 |
+
return pipe, prompt
|
43 |
|
44 |
+
def preview_and_generate_image(x,pipe, prompt, user_input_template, item):
|
45 |
+
img_start = time.time()
|
46 |
+
image = pipe(prompt=prompt,
|
47 |
+
strength = .9,
|
48 |
+
guidance_scale = 5,
|
49 |
+
image= user_input_template,
|
50 |
+
negative_prompt = negative_prompts,
|
51 |
+
num_inference_steps=40,
|
52 |
+
height = 1024, width = 768).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
image = image.save(temp_image_path+str(x) + f"{item}.png")
|
55 |
+
output_image_path = temp_image_path+str(x) + f"{item}.png"
|
56 |
+
img_time = time.time() - img_start
|
57 |
+
img_its = 50/img_time
|
58 |
+
print(f"image gen time = {img_time} and {img_its} it/s")
|
59 |
+
|
60 |
+
# Delete the image variable to keep VRAM open to load the LLM
|
61 |
+
del image
|
62 |
+
print(f"Memory after del {torch.cuda.memory_allocated()}")
|
63 |
+
print(image_list)
|
64 |
+
total_time = time.time() - start_time
|
65 |
+
print(total_time)
|
66 |
|
67 |
+
return output_image_path
|
68 |
|
69 |
|
70 |
|
item_dict_gen.py
CHANGED
@@ -1,27 +1,25 @@
|
|
1 |
-
|
2 |
import ast
|
3 |
import gc
|
4 |
import torch
|
|
|
5 |
|
6 |
-
|
|
|
|
|
|
|
7 |
def load_llm(user_input):
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
return llm(
|
15 |
-
f"GPT4 User: {prompt_instructions} the item is {user_input}: <|end_of_turn|>GPT4 Assistant:", # Prompt
|
16 |
-
max_tokens=768, # Generate up to 512 tokens
|
17 |
-
stop=["</s>"], # Example stop token - not necessarily correct for this specific model! Please check before using.
|
18 |
-
echo=False # Whether to echo the prompt
|
19 |
-
)
|
20 |
|
21 |
def call_llm_and_cleanup(user_input):
|
22 |
# Call the LLM and store its output
|
23 |
-
llm_output = load_llm(user_input)
|
24 |
-
print(llm_output
|
25 |
gc.collect()
|
26 |
if torch.cuda.is_available():
|
27 |
torch.cuda.empty_cache() # Clear VRAM allocated by PyTorch
|
@@ -55,11 +53,24 @@ def convert_to_dict(string):
|
|
55 |
|
56 |
# Instructions past 4 are not time tested and may need to be removed.
|
57 |
### Meta prompted :
|
58 |
-
prompt_instructions = """ **Purpose**: Generate a structured inventory entry for a specific item as a hashmap.
|
59 |
|
60 |
**Instructions**:
|
61 |
1. Replace `{item}` with the name of the user item, DO NOT CHANGE THE USER ITEM NAME enclosed in single quotes (e.g., `'Magic Wand'`).
|
62 |
-
2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
3. Weapons MUST have a key 'Damage'
|
64 |
4. The description should be brief and puncy, or concise and thoughtful.
|
65 |
5. The quote and SD Prompt MUST be inside double quotations ie " ".
|
@@ -111,22 +122,6 @@ Imitative Predators. Mimics can alter their outward texture to resemble wood, st
|
|
111 |
When it changes shape, a mimic excretes an adhesive that helps it seize prey and weapons that touch it. The adhesive is absorbed when the mimic assumes its amorphous form and on parts the mimic uses to move itself.
|
112 |
Cunning Hunters. Mimics live and hunt alone, though they occasionally share their feeding grounds with other creatures. Although most mimics have only predatory intelligence, a rare few evolve greater cunning and the ability to carry on simple conversations in Common or Undercommon. Such mimics might allow safe passage through their domains or provide useful information in exchange for food.
|
113 |
|
114 |
-
11.
|
115 |
-
**Format Example**:
|
116 |
-
- **Dictionary Structure**:
|
117 |
-
|
118 |
-
{"{item}": {
|
119 |
-
'Name': "{item name}",
|
120 |
-
'Type': '{item type}',
|
121 |
-
'Rarity': '{item rarity},
|
122 |
-
'Value': '{item value}',
|
123 |
-
'Properties': ["{property1}", "{property2}", ...],
|
124 |
-
'Damage': '{damage formula} , '{damage type}',
|
125 |
-
'Weight': '{weight}',
|
126 |
-
'Description': "{item description}",
|
127 |
-
'Quote': "{item quote}",
|
128 |
-
'SD Prompt': "{special description for the item}"
|
129 |
-
} }
|
130 |
|
131 |
- **Input Placeholder**:
|
132 |
- "{item}": Replace with the item name, ensuring it's wrapped in single quotes.
|
|
|
1 |
+
import replicate
|
2 |
import ast
|
3 |
import gc
|
4 |
import torch
|
5 |
+
import os
|
6 |
|
7 |
+
api_key = os.getenv('REPLICATE_API_TOKEN')
|
8 |
+
|
9 |
+
|
10 |
+
model_path = "meta/meta-llama-3-70b-instruct"
|
11 |
def load_llm(user_input):
|
12 |
+
input = {"prompt" : f" {prompt_instructions} the item is {user_input}"}
|
13 |
+
output = replicate.run(model_path,
|
14 |
+
input=input
|
15 |
+
)
|
16 |
+
return output
|
17 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def call_llm_and_cleanup(user_input):
|
20 |
# Call the LLM and store its output
|
21 |
+
llm_output = "".join(load_llm(user_input))
|
22 |
+
print("".join(llm_output))
|
23 |
gc.collect()
|
24 |
if torch.cuda.is_available():
|
25 |
torch.cuda.empty_cache() # Clear VRAM allocated by PyTorch
|
|
|
53 |
|
54 |
# Instructions past 4 are not time tested and may need to be removed.
|
55 |
### Meta prompted :
|
56 |
+
prompt_instructions = """ **Purpose**: ONLY Generate a structured inventory entry for a specific item as a hashmap. Do NOT reply with anything other than a hashmap.
|
57 |
|
58 |
**Instructions**:
|
59 |
1. Replace `{item}` with the name of the user item, DO NOT CHANGE THE USER ITEM NAME enclosed in single quotes (e.g., `'Magic Wand'`).
|
60 |
+
2. **Dictionary Structure**:
|
61 |
+
|
62 |
+
{"{item}": {
|
63 |
+
'Name': "{item name}",
|
64 |
+
'Type': '{item type}',
|
65 |
+
'Rarity': '{item rarity},
|
66 |
+
'Value': '{item value}',
|
67 |
+
'Properties': ["{property1}", "{property2}", ...],
|
68 |
+
'Damage': '{damage formula} , '{damage type}',
|
69 |
+
'Weight': '{weight}',
|
70 |
+
'Description': "{item description}",
|
71 |
+
'Quote': "{item quote}",
|
72 |
+
'SD Prompt': "{special description for the item}"
|
73 |
+
} }
|
74 |
3. Weapons MUST have a key 'Damage'
|
75 |
4. The description should be brief and puncy, or concise and thoughtful.
|
76 |
5. The quote and SD Prompt MUST be inside double quotations ie " ".
|
|
|
122 |
When it changes shape, a mimic excretes an adhesive that helps it seize prey and weapons that touch it. The adhesive is absorbed when the mimic assumes its amorphous form and on parts the mimic uses to move itself.
|
123 |
Cunning Hunters. Mimics live and hunt alone, though they occasionally share their feeding grounds with other creatures. Although most mimics have only predatory intelligence, a rare few evolve greater cunning and the ability to carry on simple conversations in Common or Undercommon. Such mimics might allow safe passage through their domains or provide useful information in exchange for food.
|
124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
- **Input Placeholder**:
|
127 |
- "{item}": Replace with the item name, ensuring it's wrapped in single quotes.
|
main.py
CHANGED
@@ -1,5 +1,6 @@
|
|
|
|
|
|
1 |
|
2 |
-
import img2img
|
3 |
import card_generator as card
|
4 |
import utilities as u
|
5 |
import ctypes
|
@@ -111,11 +112,11 @@ with gr.Blocks() as demo:
|
|
111 |
# Called when pressing button to generate image, updates gallery by returning the list of image URLs
|
112 |
def generate_image_update_gallery(num_img, sd_prompt,item_name, built_template):
|
113 |
delete_temp_images()
|
114 |
-
print(type(built_template))
|
115 |
image_list = []
|
116 |
-
|
|
|
117 |
for x in range(num_img):
|
118 |
-
preview =
|
119 |
image_list.append(preview)
|
120 |
yield image_list
|
121 |
#generate_gallery.change(image_list)
|
|
|
1 |
+
import img2img
|
2 |
+
from img2img import img_generator
|
3 |
|
|
|
4 |
import card_generator as card
|
5 |
import utilities as u
|
6 |
import ctypes
|
|
|
112 |
# Called when pressing button to generate image, updates gallery by returning the list of image URLs
|
113 |
def generate_image_update_gallery(num_img, sd_prompt,item_name, built_template):
|
114 |
delete_temp_images()
|
|
|
115 |
image_list = []
|
116 |
+
image_generator = img_generator()
|
117 |
+
img_gen, prompt = image_generator.load_img_gen(sd_prompt, item_name)
|
118 |
for x in range(num_img):
|
119 |
+
preview = image_generator.preview_and_generate_image(x,img_gen, prompt, built_template, item_name)
|
120 |
image_list.append(preview)
|
121 |
yield image_list
|
122 |
#generate_gallery.change(image_list)
|
template_builder.py
CHANGED
@@ -40,8 +40,6 @@ def paste_image_and_resize(base_image,sticker, x_position, y_position,img_width,
|
|
40 |
return base_image
|
41 |
|
42 |
def build_card_template(selected_border, selected_seed_image):
|
43 |
-
print(selected_seed_image)
|
44 |
-
print(type(selected_seed_image))
|
45 |
selected_border = u.open_image_from_url(selected_border)
|
46 |
if type(selected_seed_image) == str:
|
47 |
print(f"String : {selected_seed_image}")
|
@@ -65,6 +63,8 @@ def build_card_template(selected_border, selected_seed_image):
|
|
65 |
canvas = paste_image_and_resize(canvas, selected_seed_image,seed_x,seed_y, seed_width, seed_height)
|
66 |
|
67 |
canvas.paste(selected_border,(0,0), mask = mask)
|
|
|
|
|
68 |
|
69 |
image_list.append(canvas)
|
70 |
|
|
|
40 |
return base_image
|
41 |
|
42 |
def build_card_template(selected_border, selected_seed_image):
|
|
|
|
|
43 |
selected_border = u.open_image_from_url(selected_border)
|
44 |
if type(selected_seed_image) == str:
|
45 |
print(f"String : {selected_seed_image}")
|
|
|
63 |
canvas = paste_image_and_resize(canvas, selected_seed_image,seed_x,seed_y, seed_width, seed_height)
|
64 |
|
65 |
canvas.paste(selected_border,(0,0), mask = mask)
|
66 |
+
print(f"Canvas is : {canvas}")
|
67 |
+
print(f"Canvas is : {type(canvas)}")
|
68 |
|
69 |
image_list.append(canvas)
|
70 |
|
user_input.py
CHANGED
@@ -21,7 +21,8 @@ def index_image_paths(repo_name,directory_path):
|
|
21 |
files = []
|
22 |
for content_file in contents:
|
23 |
if content_file.type == "file":
|
24 |
-
|
|
|
25 |
|
26 |
return files
|
27 |
|
@@ -51,7 +52,7 @@ def user_pick_item(user_prompt,list_of_items):
|
|
51 |
def call_llm(user_input):
|
52 |
# Process the query and get the response
|
53 |
llm_call = igen.call_llm_and_cleanup(user_input)
|
54 |
-
response = llm_call
|
55 |
|
56 |
# Find the index of the phrase
|
57 |
index = response.find(end_phrase)
|
|
|
21 |
files = []
|
22 |
for content_file in contents:
|
23 |
if content_file.type == "file":
|
24 |
+
media_url = content_file.download_url.replace("raw.githubusercontent.com", "media.githubusercontent.com/media")
|
25 |
+
files.append(media_url) # Or content_file.path for just the path
|
26 |
|
27 |
return files
|
28 |
|
|
|
52 |
def call_llm(user_input):
|
53 |
# Process the query and get the response
|
54 |
llm_call = igen.call_llm_and_cleanup(user_input)
|
55 |
+
response = llm_call
|
56 |
|
57 |
# Find the index of the phrase
|
58 |
index = response.find(end_phrase)
|