gokaygokay
commited on
Commit
•
695f28c
1
Parent(s):
7c1d475
random seed
Browse files- llm_inference.py +8 -2
llm_inference.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
from groq import Groq
|
3 |
from openai import OpenAI
|
4 |
|
@@ -22,8 +23,6 @@ class LLMInferenceNode:
|
|
22 |
"""
|
23 |
Generates a prompt based on the provided seed, prompt type, and custom input.
|
24 |
"""
|
25 |
-
import random
|
26 |
-
|
27 |
random.seed(dynamic_seed)
|
28 |
if custom_input and custom_input.strip():
|
29 |
prompt = custom_input
|
@@ -176,6 +175,10 @@ Your output is only the caption itself, no comments or extra formatting. The cap
|
|
176 |
else:
|
177 |
user_message = f"{base_prompt}\nDescription: {input_text}"
|
178 |
|
|
|
|
|
|
|
|
|
179 |
# Select the appropriate provider
|
180 |
if provider == "Hugging Face":
|
181 |
response = self.huggingface_client.chat.completions.create(
|
@@ -187,6 +190,7 @@ Your output is only the caption itself, no comments or extra formatting. The cap
|
|
187 |
{"role": "system", "content": system_message},
|
188 |
{"role": "user", "content": user_message},
|
189 |
],
|
|
|
190 |
)
|
191 |
output = response.choices[0].message.content.strip()
|
192 |
|
@@ -199,6 +203,7 @@ Your output is only the caption itself, no comments or extra formatting. The cap
|
|
199 |
{"role": "system", "content": system_message},
|
200 |
{"role": "user", "content": user_message},
|
201 |
],
|
|
|
202 |
)
|
203 |
output = response.choices[0].message.content.strip()
|
204 |
|
@@ -211,6 +216,7 @@ Your output is only the caption itself, no comments or extra formatting. The cap
|
|
211 |
{"role": "system", "content": system_message},
|
212 |
{"role": "user", "content": user_message},
|
213 |
],
|
|
|
214 |
)
|
215 |
output = response.choices[0].message.content.strip()
|
216 |
|
|
|
1 |
import os
|
2 |
+
import random # Import the random module
|
3 |
from groq import Groq
|
4 |
from openai import OpenAI
|
5 |
|
|
|
23 |
"""
|
24 |
Generates a prompt based on the provided seed, prompt type, and custom input.
|
25 |
"""
|
|
|
|
|
26 |
random.seed(dynamic_seed)
|
27 |
if custom_input and custom_input.strip():
|
28 |
prompt = custom_input
|
|
|
175 |
else:
|
176 |
user_message = f"{base_prompt}\nDescription: {input_text}"
|
177 |
|
178 |
+
# Generate a random seed
|
179 |
+
seed = random.randint(0, 10000)
|
180 |
+
print(f"Generated seed: {seed}") # Debug print
|
181 |
+
|
182 |
# Select the appropriate provider
|
183 |
if provider == "Hugging Face":
|
184 |
response = self.huggingface_client.chat.completions.create(
|
|
|
190 |
{"role": "system", "content": system_message},
|
191 |
{"role": "user", "content": user_message},
|
192 |
],
|
193 |
+
seed=seed # Pass the seed parameter
|
194 |
)
|
195 |
output = response.choices[0].message.content.strip()
|
196 |
|
|
|
203 |
{"role": "system", "content": system_message},
|
204 |
{"role": "user", "content": user_message},
|
205 |
],
|
206 |
+
seed=seed # Pass the seed parameter
|
207 |
)
|
208 |
output = response.choices[0].message.content.strip()
|
209 |
|
|
|
216 |
{"role": "system", "content": system_message},
|
217 |
{"role": "user", "content": user_message},
|
218 |
],
|
219 |
+
seed=seed # Pass the seed parameter
|
220 |
)
|
221 |
output = response.choices[0].message.content.strip()
|
222 |
|